1 /* 2 * Copyright 2014-2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <linux/dma-buf.h> 23 #include <linux/list.h> 24 #include <linux/pagemap.h> 25 #include <linux/sched/mm.h> 26 #include <linux/sched/task.h> 27 28 #include "amdgpu_object.h" 29 #include "amdgpu_vm.h" 30 #include "amdgpu_amdkfd.h" 31 #include "amdgpu_dma_buf.h" 32 #include <uapi/linux/kfd_ioctl.h> 33 34 /* BO flag to indicate a KFD userptr BO */ 35 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) 36 37 /* Userptr restore delay, just long enough to allow consecutive VM 38 * changes to accumulate 39 */ 40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 41 42 /* Impose limit on how much memory KFD can use */ 43 static struct { 44 uint64_t max_system_mem_limit; 45 uint64_t max_ttm_mem_limit; 46 int64_t system_mem_used; 47 int64_t ttm_mem_used; 48 spinlock_t mem_limit_lock; 49 } kfd_mem_limit; 50 51 /* Struct used for amdgpu_amdkfd_bo_validate */ 52 struct amdgpu_vm_parser { 53 uint32_t domain; 54 bool wait; 55 }; 56 57 static const char * const domain_bit_to_string[] = { 58 "CPU", 59 "GTT", 60 "VRAM", 61 "GDS", 62 "GWS", 63 "OA" 64 }; 65 66 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] 67 68 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); 69 70 71 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) 72 { 73 return (struct amdgpu_device *)kgd; 74 } 75 76 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, 77 struct kgd_mem *mem) 78 { 79 struct kfd_bo_va_list *entry; 80 81 list_for_each_entry(entry, &mem->bo_va_list, bo_list) 82 if (entry->bo_va->base.vm == avm) 83 return false; 84 85 return true; 86 } 87 88 /* Set memory usage limits. Current, limits are 89 * System (TTM + userptr) memory - 15/16th System RAM 90 * TTM memory - 3/8th System RAM 91 */ 92 void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 93 { 94 struct sysinfo si; 95 uint64_t mem; 96 97 si_meminfo(&si); 98 mem = si.totalram - si.totalhigh; 99 mem *= si.mem_unit; 100 101 spin_lock_init(&kfd_mem_limit.mem_limit_lock); 102 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); 103 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); 104 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", 105 (kfd_mem_limit.max_system_mem_limit >> 20), 106 (kfd_mem_limit.max_ttm_mem_limit >> 20)); 107 } 108 109 /* Estimate page table size needed to represent a given memory size 110 * 111 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory 112 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB 113 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize 114 * for 2MB pages for TLB efficiency. However, small allocations and 115 * fragmented system memory still need some 4KB pages. We choose a 116 * compromise that should work in most cases without reserving too 117 * much memory for page tables unnecessarily (factor 16K, >> 14). 118 */ 119 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14) 120 121 static size_t amdgpu_amdkfd_acc_size(uint64_t size) 122 { 123 size >>= PAGE_SHIFT; 124 size *= sizeof(dma_addr_t) + sizeof(void *); 125 126 return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) + 127 __roundup_pow_of_two(sizeof(struct ttm_tt)) + 128 PAGE_ALIGN(size); 129 } 130 131 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, 132 uint64_t size, u32 domain, bool sg) 133 { 134 uint64_t reserved_for_pt = 135 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); 136 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; 137 int ret = 0; 138 139 acc_size = amdgpu_amdkfd_acc_size(size); 140 141 vram_needed = 0; 142 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 143 /* TTM GTT memory */ 144 system_mem_needed = acc_size + size; 145 ttm_mem_needed = acc_size + size; 146 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 147 /* Userptr */ 148 system_mem_needed = acc_size + size; 149 ttm_mem_needed = acc_size; 150 } else { 151 /* VRAM and SG */ 152 system_mem_needed = acc_size; 153 ttm_mem_needed = acc_size; 154 if (domain == AMDGPU_GEM_DOMAIN_VRAM) 155 vram_needed = size; 156 } 157 158 spin_lock(&kfd_mem_limit.mem_limit_lock); 159 160 if (kfd_mem_limit.system_mem_used + system_mem_needed > 161 kfd_mem_limit.max_system_mem_limit) 162 pr_debug("Set no_system_mem_limit=1 if using shared memory\n"); 163 164 if ((kfd_mem_limit.system_mem_used + system_mem_needed > 165 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || 166 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > 167 kfd_mem_limit.max_ttm_mem_limit) || 168 (adev->kfd.vram_used + vram_needed > 169 adev->gmc.real_vram_size - reserved_for_pt)) { 170 ret = -ENOMEM; 171 } else { 172 kfd_mem_limit.system_mem_used += system_mem_needed; 173 kfd_mem_limit.ttm_mem_used += ttm_mem_needed; 174 adev->kfd.vram_used += vram_needed; 175 } 176 177 spin_unlock(&kfd_mem_limit.mem_limit_lock); 178 return ret; 179 } 180 181 static void unreserve_mem_limit(struct amdgpu_device *adev, 182 uint64_t size, u32 domain, bool sg) 183 { 184 size_t acc_size; 185 186 acc_size = amdgpu_amdkfd_acc_size(size); 187 188 spin_lock(&kfd_mem_limit.mem_limit_lock); 189 if (domain == AMDGPU_GEM_DOMAIN_GTT) { 190 kfd_mem_limit.system_mem_used -= (acc_size + size); 191 kfd_mem_limit.ttm_mem_used -= (acc_size + size); 192 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { 193 kfd_mem_limit.system_mem_used -= (acc_size + size); 194 kfd_mem_limit.ttm_mem_used -= acc_size; 195 } else { 196 kfd_mem_limit.system_mem_used -= acc_size; 197 kfd_mem_limit.ttm_mem_used -= acc_size; 198 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 199 adev->kfd.vram_used -= size; 200 WARN_ONCE(adev->kfd.vram_used < 0, 201 "kfd VRAM memory accounting unbalanced"); 202 } 203 } 204 WARN_ONCE(kfd_mem_limit.system_mem_used < 0, 205 "kfd system memory accounting unbalanced"); 206 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, 207 "kfd TTM memory accounting unbalanced"); 208 209 spin_unlock(&kfd_mem_limit.mem_limit_lock); 210 } 211 212 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) 213 { 214 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 215 u32 domain = bo->preferred_domains; 216 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); 217 218 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { 219 domain = AMDGPU_GEM_DOMAIN_CPU; 220 sg = false; 221 } 222 223 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); 224 } 225 226 227 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's 228 * reservation object. 229 * 230 * @bo: [IN] Remove eviction fence(s) from this BO 231 * @ef: [IN] This eviction fence is removed if it 232 * is present in the shared list. 233 * 234 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. 235 */ 236 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 237 struct amdgpu_amdkfd_fence *ef) 238 { 239 struct dma_resv *resv = bo->tbo.base.resv; 240 struct dma_resv_list *old, *new; 241 unsigned int i, j, k; 242 243 if (!ef) 244 return -EINVAL; 245 246 old = dma_resv_get_list(resv); 247 if (!old) 248 return 0; 249 250 new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL); 251 if (!new) 252 return -ENOMEM; 253 254 /* Go through all the shared fences in the resevation object and sort 255 * the interesting ones to the end of the list. 256 */ 257 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) { 258 struct dma_fence *f; 259 260 f = rcu_dereference_protected(old->shared[i], 261 dma_resv_held(resv)); 262 263 if (f->context == ef->base.context) 264 RCU_INIT_POINTER(new->shared[--j], f); 265 else 266 RCU_INIT_POINTER(new->shared[k++], f); 267 } 268 new->shared_max = old->shared_max; 269 new->shared_count = k; 270 271 /* Install the new fence list, seqcount provides the barriers */ 272 write_seqcount_begin(&resv->seq); 273 RCU_INIT_POINTER(resv->fence, new); 274 write_seqcount_end(&resv->seq); 275 276 /* Drop the references to the removed fences or move them to ef_list */ 277 for (i = j, k = 0; i < old->shared_count; ++i) { 278 struct dma_fence *f; 279 280 f = rcu_dereference_protected(new->shared[i], 281 dma_resv_held(resv)); 282 dma_fence_put(f); 283 } 284 kfree_rcu(old, rcu); 285 286 return 0; 287 } 288 289 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 290 { 291 struct amdgpu_bo *root = bo; 292 struct amdgpu_vm_bo_base *vm_bo; 293 struct amdgpu_vm *vm; 294 struct amdkfd_process_info *info; 295 struct amdgpu_amdkfd_fence *ef; 296 int ret; 297 298 /* we can always get vm_bo from root PD bo.*/ 299 while (root->parent) 300 root = root->parent; 301 302 vm_bo = root->vm_bo; 303 if (!vm_bo) 304 return 0; 305 306 vm = vm_bo->vm; 307 if (!vm) 308 return 0; 309 310 info = vm->process_info; 311 if (!info || !info->eviction_fence) 312 return 0; 313 314 ef = container_of(dma_fence_get(&info->eviction_fence->base), 315 struct amdgpu_amdkfd_fence, base); 316 317 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); 318 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); 319 dma_resv_unlock(bo->tbo.base.resv); 320 321 dma_fence_put(&ef->base); 322 return ret; 323 } 324 325 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, 326 bool wait) 327 { 328 struct ttm_operation_ctx ctx = { false, false }; 329 int ret; 330 331 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), 332 "Called with userptr BO")) 333 return -EINVAL; 334 335 amdgpu_bo_placement_from_domain(bo, domain); 336 337 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 338 if (ret) 339 goto validate_fail; 340 if (wait) 341 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 342 343 validate_fail: 344 return ret; 345 } 346 347 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) 348 { 349 struct amdgpu_vm_parser *p = param; 350 351 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait); 352 } 353 354 /* vm_validate_pt_pd_bos - Validate page table and directory BOs 355 * 356 * Page directories are not updated here because huge page handling 357 * during page table updates can invalidate page directory entries 358 * again. Page directories are only updated after updating page 359 * tables. 360 */ 361 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) 362 { 363 struct amdgpu_bo *pd = vm->root.base.bo; 364 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 365 struct amdgpu_vm_parser param; 366 int ret; 367 368 param.domain = AMDGPU_GEM_DOMAIN_VRAM; 369 param.wait = false; 370 371 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate, 372 ¶m); 373 if (ret) { 374 pr_err("failed to validate PT BOs\n"); 375 return ret; 376 } 377 378 ret = amdgpu_amdkfd_validate(¶m, pd); 379 if (ret) { 380 pr_err("failed to validate PD\n"); 381 return ret; 382 } 383 384 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); 385 386 if (vm->use_cpu_for_update) { 387 ret = amdgpu_bo_kmap(pd, NULL); 388 if (ret) { 389 pr_err("failed to kmap PD, ret=%d\n", ret); 390 return ret; 391 } 392 } 393 394 return 0; 395 } 396 397 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 398 { 399 struct amdgpu_bo *pd = vm->root.base.bo; 400 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 401 int ret; 402 403 ret = amdgpu_vm_update_pdes(adev, vm, false); 404 if (ret) 405 return ret; 406 407 return amdgpu_sync_fence(sync, vm->last_update); 408 } 409 410 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) 411 { 412 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 413 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; 414 uint32_t mapping_flags; 415 416 mapping_flags = AMDGPU_VM_PAGE_READABLE; 417 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) 418 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; 419 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) 420 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 421 422 switch (adev->asic_type) { 423 case CHIP_ARCTURUS: 424 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 425 if (bo_adev == adev) 426 mapping_flags |= coherent ? 427 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 428 else 429 mapping_flags |= AMDGPU_VM_MTYPE_UC; 430 } else { 431 mapping_flags |= coherent ? 432 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 433 } 434 break; 435 default: 436 mapping_flags |= coherent ? 437 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 438 } 439 440 return amdgpu_gem_va_map_flags(adev, mapping_flags); 441 } 442 443 /* add_bo_to_vm - Add a BO to a VM 444 * 445 * Everything that needs to bo done only once when a BO is first added 446 * to a VM. It can later be mapped and unmapped many times without 447 * repeating these steps. 448 * 449 * 1. Allocate and initialize BO VA entry data structure 450 * 2. Add BO to the VM 451 * 3. Determine ASIC-specific PTE flags 452 * 4. Alloc page tables and directories if needed 453 * 4a. Validate new page tables and directories 454 */ 455 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, 456 struct amdgpu_vm *vm, bool is_aql, 457 struct kfd_bo_va_list **p_bo_va_entry) 458 { 459 int ret; 460 struct kfd_bo_va_list *bo_va_entry; 461 struct amdgpu_bo *bo = mem->bo; 462 uint64_t va = mem->va; 463 struct list_head *list_bo_va = &mem->bo_va_list; 464 unsigned long bo_size = bo->tbo.base.size; 465 466 if (!va) { 467 pr_err("Invalid VA when adding BO to VM\n"); 468 return -EINVAL; 469 } 470 471 if (is_aql) 472 va += bo_size; 473 474 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL); 475 if (!bo_va_entry) 476 return -ENOMEM; 477 478 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, 479 va + bo_size, vm); 480 481 /* Add BO to VM internal data structures*/ 482 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo); 483 if (!bo_va_entry->bo_va) { 484 ret = -EINVAL; 485 pr_err("Failed to add BO object to VM. ret == %d\n", 486 ret); 487 goto err_vmadd; 488 } 489 490 bo_va_entry->va = va; 491 bo_va_entry->pte_flags = get_pte_flags(adev, mem); 492 bo_va_entry->kgd_dev = (void *)adev; 493 list_add(&bo_va_entry->bo_list, list_bo_va); 494 495 if (p_bo_va_entry) 496 *p_bo_va_entry = bo_va_entry; 497 498 /* Allocate validate page tables if needed */ 499 ret = vm_validate_pt_pd_bos(vm); 500 if (ret) { 501 pr_err("validate_pt_pd_bos() failed\n"); 502 goto err_alloc_pts; 503 } 504 505 return 0; 506 507 err_alloc_pts: 508 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va); 509 list_del(&bo_va_entry->bo_list); 510 err_vmadd: 511 kfree(bo_va_entry); 512 return ret; 513 } 514 515 static void remove_bo_from_vm(struct amdgpu_device *adev, 516 struct kfd_bo_va_list *entry, unsigned long size) 517 { 518 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n", 519 entry->va, 520 entry->va + size, entry); 521 amdgpu_vm_bo_rmv(adev, entry->bo_va); 522 list_del(&entry->bo_list); 523 kfree(entry); 524 } 525 526 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, 527 struct amdkfd_process_info *process_info, 528 bool userptr) 529 { 530 struct ttm_validate_buffer *entry = &mem->validate_list; 531 struct amdgpu_bo *bo = mem->bo; 532 533 INIT_LIST_HEAD(&entry->head); 534 entry->num_shared = 1; 535 entry->bo = &bo->tbo; 536 mutex_lock(&process_info->lock); 537 if (userptr) 538 list_add_tail(&entry->head, &process_info->userptr_valid_list); 539 else 540 list_add_tail(&entry->head, &process_info->kfd_bo_list); 541 mutex_unlock(&process_info->lock); 542 } 543 544 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, 545 struct amdkfd_process_info *process_info) 546 { 547 struct ttm_validate_buffer *bo_list_entry; 548 549 bo_list_entry = &mem->validate_list; 550 mutex_lock(&process_info->lock); 551 list_del(&bo_list_entry->head); 552 mutex_unlock(&process_info->lock); 553 } 554 555 /* Initializes user pages. It registers the MMU notifier and validates 556 * the userptr BO in the GTT domain. 557 * 558 * The BO must already be on the userptr_valid_list. Otherwise an 559 * eviction and restore may happen that leaves the new BO unmapped 560 * with the user mode queues running. 561 * 562 * Takes the process_info->lock to protect against concurrent restore 563 * workers. 564 * 565 * Returns 0 for success, negative errno for errors. 566 */ 567 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr) 568 { 569 struct amdkfd_process_info *process_info = mem->process_info; 570 struct amdgpu_bo *bo = mem->bo; 571 struct ttm_operation_ctx ctx = { true, false }; 572 int ret = 0; 573 574 mutex_lock(&process_info->lock); 575 576 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0); 577 if (ret) { 578 pr_err("%s: Failed to set userptr: %d\n", __func__, ret); 579 goto out; 580 } 581 582 ret = amdgpu_mn_register(bo, user_addr); 583 if (ret) { 584 pr_err("%s: Failed to register MMU notifier: %d\n", 585 __func__, ret); 586 goto out; 587 } 588 589 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 590 if (ret) { 591 pr_err("%s: Failed to get user pages: %d\n", __func__, ret); 592 goto unregister_out; 593 } 594 595 ret = amdgpu_bo_reserve(bo, true); 596 if (ret) { 597 pr_err("%s: Failed to reserve BO\n", __func__); 598 goto release_out; 599 } 600 amdgpu_bo_placement_from_domain(bo, mem->domain); 601 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 602 if (ret) 603 pr_err("%s: failed to validate BO\n", __func__); 604 amdgpu_bo_unreserve(bo); 605 606 release_out: 607 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 608 unregister_out: 609 if (ret) 610 amdgpu_mn_unregister(bo); 611 out: 612 mutex_unlock(&process_info->lock); 613 return ret; 614 } 615 616 /* Reserving a BO and its page table BOs must happen atomically to 617 * avoid deadlocks. Some operations update multiple VMs at once. Track 618 * all the reservation info in a context structure. Optionally a sync 619 * object can track VM updates. 620 */ 621 struct bo_vm_reservation_context { 622 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */ 623 unsigned int n_vms; /* Number of VMs reserved */ 624 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */ 625 struct ww_acquire_ctx ticket; /* Reservation ticket */ 626 struct list_head list, duplicates; /* BO lists */ 627 struct amdgpu_sync *sync; /* Pointer to sync object */ 628 bool reserved; /* Whether BOs are reserved */ 629 }; 630 631 enum bo_vm_match { 632 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */ 633 BO_VM_MAPPED, /* Match VMs where a BO is mapped */ 634 BO_VM_ALL, /* Match all VMs a BO was added to */ 635 }; 636 637 /** 638 * reserve_bo_and_vm - reserve a BO and a VM unconditionally. 639 * @mem: KFD BO structure. 640 * @vm: the VM to reserve. 641 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 642 */ 643 static int reserve_bo_and_vm(struct kgd_mem *mem, 644 struct amdgpu_vm *vm, 645 struct bo_vm_reservation_context *ctx) 646 { 647 struct amdgpu_bo *bo = mem->bo; 648 int ret; 649 650 WARN_ON(!vm); 651 652 ctx->reserved = false; 653 ctx->n_vms = 1; 654 ctx->sync = &mem->sync; 655 656 INIT_LIST_HEAD(&ctx->list); 657 INIT_LIST_HEAD(&ctx->duplicates); 658 659 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL); 660 if (!ctx->vm_pd) 661 return -ENOMEM; 662 663 ctx->kfd_bo.priority = 0; 664 ctx->kfd_bo.tv.bo = &bo->tbo; 665 ctx->kfd_bo.tv.num_shared = 1; 666 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 667 668 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); 669 670 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 671 false, &ctx->duplicates); 672 if (ret) { 673 pr_err("Failed to reserve buffers in ttm.\n"); 674 kfree(ctx->vm_pd); 675 ctx->vm_pd = NULL; 676 return ret; 677 } 678 679 ctx->reserved = true; 680 return 0; 681 } 682 683 /** 684 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally 685 * @mem: KFD BO structure. 686 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO 687 * is used. Otherwise, a single VM associated with the BO. 688 * @map_type: the mapping status that will be used to filter the VMs. 689 * @ctx: the struct that will be used in unreserve_bo_and_vms(). 690 * 691 * Returns 0 for success, negative for failure. 692 */ 693 static int reserve_bo_and_cond_vms(struct kgd_mem *mem, 694 struct amdgpu_vm *vm, enum bo_vm_match map_type, 695 struct bo_vm_reservation_context *ctx) 696 { 697 struct amdgpu_bo *bo = mem->bo; 698 struct kfd_bo_va_list *entry; 699 unsigned int i; 700 int ret; 701 702 ctx->reserved = false; 703 ctx->n_vms = 0; 704 ctx->vm_pd = NULL; 705 ctx->sync = &mem->sync; 706 707 INIT_LIST_HEAD(&ctx->list); 708 INIT_LIST_HEAD(&ctx->duplicates); 709 710 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 711 if ((vm && vm != entry->bo_va->base.vm) || 712 (entry->is_mapped != map_type 713 && map_type != BO_VM_ALL)) 714 continue; 715 716 ctx->n_vms++; 717 } 718 719 if (ctx->n_vms != 0) { 720 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), 721 GFP_KERNEL); 722 if (!ctx->vm_pd) 723 return -ENOMEM; 724 } 725 726 ctx->kfd_bo.priority = 0; 727 ctx->kfd_bo.tv.bo = &bo->tbo; 728 ctx->kfd_bo.tv.num_shared = 1; 729 list_add(&ctx->kfd_bo.tv.head, &ctx->list); 730 731 i = 0; 732 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 733 if ((vm && vm != entry->bo_va->base.vm) || 734 (entry->is_mapped != map_type 735 && map_type != BO_VM_ALL)) 736 continue; 737 738 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list, 739 &ctx->vm_pd[i]); 740 i++; 741 } 742 743 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, 744 false, &ctx->duplicates); 745 if (ret) { 746 pr_err("Failed to reserve buffers in ttm.\n"); 747 kfree(ctx->vm_pd); 748 ctx->vm_pd = NULL; 749 return ret; 750 } 751 752 ctx->reserved = true; 753 return 0; 754 } 755 756 /** 757 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context 758 * @ctx: Reservation context to unreserve 759 * @wait: Optionally wait for a sync object representing pending VM updates 760 * @intr: Whether the wait is interruptible 761 * 762 * Also frees any resources allocated in 763 * reserve_bo_and_(cond_)vm(s). Returns the status from 764 * amdgpu_sync_wait. 765 */ 766 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, 767 bool wait, bool intr) 768 { 769 int ret = 0; 770 771 if (wait) 772 ret = amdgpu_sync_wait(ctx->sync, intr); 773 774 if (ctx->reserved) 775 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list); 776 kfree(ctx->vm_pd); 777 778 ctx->sync = NULL; 779 780 ctx->reserved = false; 781 ctx->vm_pd = NULL; 782 783 return ret; 784 } 785 786 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, 787 struct kfd_bo_va_list *entry, 788 struct amdgpu_sync *sync) 789 { 790 struct amdgpu_bo_va *bo_va = entry->bo_va; 791 struct amdgpu_vm *vm = bo_va->base.vm; 792 793 amdgpu_vm_bo_unmap(adev, bo_va, entry->va); 794 795 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); 796 797 amdgpu_sync_fence(sync, bo_va->last_pt_update); 798 799 return 0; 800 } 801 802 static int update_gpuvm_pte(struct amdgpu_device *adev, 803 struct kfd_bo_va_list *entry, 804 struct amdgpu_sync *sync) 805 { 806 int ret; 807 struct amdgpu_bo_va *bo_va = entry->bo_va; 808 809 /* Update the page tables */ 810 ret = amdgpu_vm_bo_update(adev, bo_va, false); 811 if (ret) { 812 pr_err("amdgpu_vm_bo_update failed\n"); 813 return ret; 814 } 815 816 return amdgpu_sync_fence(sync, bo_va->last_pt_update); 817 } 818 819 static int map_bo_to_gpuvm(struct amdgpu_device *adev, 820 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync, 821 bool no_update_pte) 822 { 823 int ret; 824 825 /* Set virtual address for the allocation */ 826 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0, 827 amdgpu_bo_size(entry->bo_va->base.bo), 828 entry->pte_flags); 829 if (ret) { 830 pr_err("Failed to map VA 0x%llx in vm. ret %d\n", 831 entry->va, ret); 832 return ret; 833 } 834 835 if (no_update_pte) 836 return 0; 837 838 ret = update_gpuvm_pte(adev, entry, sync); 839 if (ret) { 840 pr_err("update_gpuvm_pte() failed\n"); 841 goto update_gpuvm_pte_failed; 842 } 843 844 return 0; 845 846 update_gpuvm_pte_failed: 847 unmap_bo_from_gpuvm(adev, entry, sync); 848 return ret; 849 } 850 851 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size) 852 { 853 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); 854 855 if (!sg) 856 return NULL; 857 if (sg_alloc_table(sg, 1, GFP_KERNEL)) { 858 kfree(sg); 859 return NULL; 860 } 861 sg->sgl->dma_address = addr; 862 sg->sgl->length = size; 863 #ifdef CONFIG_NEED_SG_DMA_LENGTH 864 sg->sgl->dma_length = size; 865 #endif 866 return sg; 867 } 868 869 static int process_validate_vms(struct amdkfd_process_info *process_info) 870 { 871 struct amdgpu_vm *peer_vm; 872 int ret; 873 874 list_for_each_entry(peer_vm, &process_info->vm_list_head, 875 vm_list_node) { 876 ret = vm_validate_pt_pd_bos(peer_vm); 877 if (ret) 878 return ret; 879 } 880 881 return 0; 882 } 883 884 static int process_sync_pds_resv(struct amdkfd_process_info *process_info, 885 struct amdgpu_sync *sync) 886 { 887 struct amdgpu_vm *peer_vm; 888 int ret; 889 890 list_for_each_entry(peer_vm, &process_info->vm_list_head, 891 vm_list_node) { 892 struct amdgpu_bo *pd = peer_vm->root.base.bo; 893 894 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, 895 AMDGPU_SYNC_NE_OWNER, 896 AMDGPU_FENCE_OWNER_KFD); 897 if (ret) 898 return ret; 899 } 900 901 return 0; 902 } 903 904 static int process_update_pds(struct amdkfd_process_info *process_info, 905 struct amdgpu_sync *sync) 906 { 907 struct amdgpu_vm *peer_vm; 908 int ret; 909 910 list_for_each_entry(peer_vm, &process_info->vm_list_head, 911 vm_list_node) { 912 ret = vm_update_pds(peer_vm, sync); 913 if (ret) 914 return ret; 915 } 916 917 return 0; 918 } 919 920 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, 921 struct dma_fence **ef) 922 { 923 struct amdkfd_process_info *info = NULL; 924 int ret; 925 926 if (!*process_info) { 927 info = kzalloc(sizeof(*info), GFP_KERNEL); 928 if (!info) 929 return -ENOMEM; 930 931 mutex_init(&info->lock); 932 INIT_LIST_HEAD(&info->vm_list_head); 933 INIT_LIST_HEAD(&info->kfd_bo_list); 934 INIT_LIST_HEAD(&info->userptr_valid_list); 935 INIT_LIST_HEAD(&info->userptr_inval_list); 936 937 info->eviction_fence = 938 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 939 current->mm); 940 if (!info->eviction_fence) { 941 pr_err("Failed to create eviction fence\n"); 942 ret = -ENOMEM; 943 goto create_evict_fence_fail; 944 } 945 946 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); 947 atomic_set(&info->evicted_bos, 0); 948 INIT_DELAYED_WORK(&info->restore_userptr_work, 949 amdgpu_amdkfd_restore_userptr_worker); 950 951 *process_info = info; 952 *ef = dma_fence_get(&info->eviction_fence->base); 953 } 954 955 vm->process_info = *process_info; 956 957 /* Validate page directory and attach eviction fence */ 958 ret = amdgpu_bo_reserve(vm->root.base.bo, true); 959 if (ret) 960 goto reserve_pd_fail; 961 ret = vm_validate_pt_pd_bos(vm); 962 if (ret) { 963 pr_err("validate_pt_pd_bos() failed\n"); 964 goto validate_pd_fail; 965 } 966 ret = amdgpu_bo_sync_wait(vm->root.base.bo, 967 AMDGPU_FENCE_OWNER_KFD, false); 968 if (ret) 969 goto wait_pd_fail; 970 ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); 971 if (ret) 972 goto reserve_shared_fail; 973 amdgpu_bo_fence(vm->root.base.bo, 974 &vm->process_info->eviction_fence->base, true); 975 amdgpu_bo_unreserve(vm->root.base.bo); 976 977 /* Update process info */ 978 mutex_lock(&vm->process_info->lock); 979 list_add_tail(&vm->vm_list_node, 980 &(vm->process_info->vm_list_head)); 981 vm->process_info->n_vms++; 982 mutex_unlock(&vm->process_info->lock); 983 984 return 0; 985 986 reserve_shared_fail: 987 wait_pd_fail: 988 validate_pd_fail: 989 amdgpu_bo_unreserve(vm->root.base.bo); 990 reserve_pd_fail: 991 vm->process_info = NULL; 992 if (info) { 993 /* Two fence references: one in info and one in *ef */ 994 dma_fence_put(&info->eviction_fence->base); 995 dma_fence_put(*ef); 996 *ef = NULL; 997 *process_info = NULL; 998 put_pid(info->pid); 999 create_evict_fence_fail: 1000 mutex_destroy(&info->lock); 1001 kfree(info); 1002 } 1003 return ret; 1004 } 1005 1006 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid, 1007 void **vm, void **process_info, 1008 struct dma_fence **ef) 1009 { 1010 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1011 struct amdgpu_vm *new_vm; 1012 int ret; 1013 1014 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL); 1015 if (!new_vm) 1016 return -ENOMEM; 1017 1018 /* Initialize AMDGPU part of the VM */ 1019 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid); 1020 if (ret) { 1021 pr_err("Failed init vm ret %d\n", ret); 1022 goto amdgpu_vm_init_fail; 1023 } 1024 1025 /* Initialize KFD part of the VM and process info */ 1026 ret = init_kfd_vm(new_vm, process_info, ef); 1027 if (ret) 1028 goto init_kfd_vm_fail; 1029 1030 *vm = (void *) new_vm; 1031 1032 return 0; 1033 1034 init_kfd_vm_fail: 1035 amdgpu_vm_fini(adev, new_vm); 1036 amdgpu_vm_init_fail: 1037 kfree(new_vm); 1038 return ret; 1039 } 1040 1041 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 1042 struct file *filp, u32 pasid, 1043 void **vm, void **process_info, 1044 struct dma_fence **ef) 1045 { 1046 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1047 struct drm_file *drm_priv = filp->private_data; 1048 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv; 1049 struct amdgpu_vm *avm = &drv_priv->vm; 1050 int ret; 1051 1052 /* Already a compute VM? */ 1053 if (avm->process_info) 1054 return -EINVAL; 1055 1056 /* Convert VM into a compute VM */ 1057 ret = amdgpu_vm_make_compute(adev, avm, pasid); 1058 if (ret) 1059 return ret; 1060 1061 /* Initialize KFD part of the VM and process info */ 1062 ret = init_kfd_vm(avm, process_info, ef); 1063 if (ret) 1064 return ret; 1065 1066 *vm = (void *)avm; 1067 1068 return 0; 1069 } 1070 1071 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 1072 struct amdgpu_vm *vm) 1073 { 1074 struct amdkfd_process_info *process_info = vm->process_info; 1075 struct amdgpu_bo *pd = vm->root.base.bo; 1076 1077 if (!process_info) 1078 return; 1079 1080 /* Release eviction fence from PD */ 1081 amdgpu_bo_reserve(pd, false); 1082 amdgpu_bo_fence(pd, NULL, false); 1083 amdgpu_bo_unreserve(pd); 1084 1085 /* Update process info */ 1086 mutex_lock(&process_info->lock); 1087 process_info->n_vms--; 1088 list_del(&vm->vm_list_node); 1089 mutex_unlock(&process_info->lock); 1090 1091 vm->process_info = NULL; 1092 1093 /* Release per-process resources when last compute VM is destroyed */ 1094 if (!process_info->n_vms) { 1095 WARN_ON(!list_empty(&process_info->kfd_bo_list)); 1096 WARN_ON(!list_empty(&process_info->userptr_valid_list)); 1097 WARN_ON(!list_empty(&process_info->userptr_inval_list)); 1098 1099 dma_fence_put(&process_info->eviction_fence->base); 1100 cancel_delayed_work_sync(&process_info->restore_userptr_work); 1101 put_pid(process_info->pid); 1102 mutex_destroy(&process_info->lock); 1103 kfree(process_info); 1104 } 1105 } 1106 1107 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm) 1108 { 1109 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1110 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1111 1112 if (WARN_ON(!kgd || !vm)) 1113 return; 1114 1115 pr_debug("Destroying process vm %p\n", vm); 1116 1117 /* Release the VM context */ 1118 amdgpu_vm_fini(adev, avm); 1119 kfree(vm); 1120 } 1121 1122 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) 1123 { 1124 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1125 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1126 1127 if (WARN_ON(!kgd || !vm)) 1128 return; 1129 1130 pr_debug("Releasing process vm %p\n", vm); 1131 1132 /* The original pasid of amdgpu vm has already been 1133 * released during making a amdgpu vm to a compute vm 1134 * The current pasid is managed by kfd and will be 1135 * released on kfd process destroy. Set amdgpu pasid 1136 * to 0 to avoid duplicate release. 1137 */ 1138 amdgpu_vm_release_compute(adev, avm); 1139 } 1140 1141 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) 1142 { 1143 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1144 struct amdgpu_bo *pd = avm->root.base.bo; 1145 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1146 1147 if (adev->asic_type < CHIP_VEGA10) 1148 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; 1149 return avm->pd_phys_addr; 1150 } 1151 1152 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1153 struct kgd_dev *kgd, uint64_t va, uint64_t size, 1154 void *vm, struct kgd_mem **mem, 1155 uint64_t *offset, uint32_t flags) 1156 { 1157 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1158 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1159 enum ttm_bo_type bo_type = ttm_bo_type_device; 1160 struct sg_table *sg = NULL; 1161 uint64_t user_addr = 0; 1162 struct amdgpu_bo *bo; 1163 struct amdgpu_bo_param bp; 1164 u32 domain, alloc_domain; 1165 u64 alloc_flags; 1166 int ret; 1167 1168 /* 1169 * Check on which domain to allocate BO 1170 */ 1171 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 1172 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; 1173 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1174 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? 1175 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 1176 AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 1177 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 1178 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1179 alloc_flags = 0; 1180 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 1181 domain = AMDGPU_GEM_DOMAIN_GTT; 1182 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1183 alloc_flags = 0; 1184 if (!offset || !*offset) 1185 return -EINVAL; 1186 user_addr = untagged_addr(*offset); 1187 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | 1188 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { 1189 domain = AMDGPU_GEM_DOMAIN_GTT; 1190 alloc_domain = AMDGPU_GEM_DOMAIN_CPU; 1191 bo_type = ttm_bo_type_sg; 1192 alloc_flags = 0; 1193 if (size > UINT_MAX) 1194 return -EINVAL; 1195 sg = create_doorbell_sg(*offset, size); 1196 if (!sg) 1197 return -ENOMEM; 1198 } else { 1199 return -EINVAL; 1200 } 1201 1202 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1203 if (!*mem) { 1204 ret = -ENOMEM; 1205 goto err; 1206 } 1207 INIT_LIST_HEAD(&(*mem)->bo_va_list); 1208 mutex_init(&(*mem)->lock); 1209 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); 1210 1211 /* Workaround for AQL queue wraparound bug. Map the same 1212 * memory twice. That means we only actually allocate half 1213 * the memory. 1214 */ 1215 if ((*mem)->aql_queue) 1216 size = size >> 1; 1217 1218 (*mem)->alloc_flags = flags; 1219 1220 amdgpu_sync_create(&(*mem)->sync); 1221 1222 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); 1223 if (ret) { 1224 pr_debug("Insufficient memory\n"); 1225 goto err_reserve_limit; 1226 } 1227 1228 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", 1229 va, size, domain_string(alloc_domain)); 1230 1231 memset(&bp, 0, sizeof(bp)); 1232 bp.size = size; 1233 bp.byte_align = 1; 1234 bp.domain = alloc_domain; 1235 bp.flags = alloc_flags; 1236 bp.type = bo_type; 1237 bp.resv = NULL; 1238 ret = amdgpu_bo_create(adev, &bp, &bo); 1239 if (ret) { 1240 pr_debug("Failed to create BO on domain %s. ret %d\n", 1241 domain_string(alloc_domain), ret); 1242 goto err_bo_create; 1243 } 1244 if (bo_type == ttm_bo_type_sg) { 1245 bo->tbo.sg = sg; 1246 bo->tbo.ttm->sg = sg; 1247 } 1248 bo->kfd_bo = *mem; 1249 (*mem)->bo = bo; 1250 if (user_addr) 1251 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO; 1252 1253 (*mem)->va = va; 1254 (*mem)->domain = domain; 1255 (*mem)->mapped_to_gpu_memory = 0; 1256 (*mem)->process_info = avm->process_info; 1257 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); 1258 1259 if (user_addr) { 1260 ret = init_user_pages(*mem, user_addr); 1261 if (ret) 1262 goto allocate_init_user_pages_failed; 1263 } 1264 1265 if (offset) 1266 *offset = amdgpu_bo_mmap_offset(bo); 1267 1268 return 0; 1269 1270 allocate_init_user_pages_failed: 1271 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 1272 amdgpu_bo_unref(&bo); 1273 /* Don't unreserve system mem limit twice */ 1274 goto err_reserve_limit; 1275 err_bo_create: 1276 unreserve_mem_limit(adev, size, alloc_domain, !!sg); 1277 err_reserve_limit: 1278 mutex_destroy(&(*mem)->lock); 1279 kfree(*mem); 1280 err: 1281 if (sg) { 1282 sg_free_table(sg); 1283 kfree(sg); 1284 } 1285 return ret; 1286 } 1287 1288 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 1289 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size) 1290 { 1291 struct amdkfd_process_info *process_info = mem->process_info; 1292 unsigned long bo_size = mem->bo->tbo.base.size; 1293 struct kfd_bo_va_list *entry, *tmp; 1294 struct bo_vm_reservation_context ctx; 1295 struct ttm_validate_buffer *bo_list_entry; 1296 unsigned int mapped_to_gpu_memory; 1297 int ret; 1298 bool is_imported = false; 1299 1300 mutex_lock(&mem->lock); 1301 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; 1302 is_imported = mem->is_imported; 1303 mutex_unlock(&mem->lock); 1304 /* lock is not needed after this, since mem is unused and will 1305 * be freed anyway 1306 */ 1307 1308 if (mapped_to_gpu_memory > 0) { 1309 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", 1310 mem->va, bo_size); 1311 return -EBUSY; 1312 } 1313 1314 /* Make sure restore workers don't access the BO any more */ 1315 bo_list_entry = &mem->validate_list; 1316 mutex_lock(&process_info->lock); 1317 list_del(&bo_list_entry->head); 1318 mutex_unlock(&process_info->lock); 1319 1320 /* No more MMU notifiers */ 1321 amdgpu_mn_unregister(mem->bo); 1322 1323 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1324 if (unlikely(ret)) 1325 return ret; 1326 1327 /* The eviction fence should be removed by the last unmap. 1328 * TODO: Log an error condition if the bo still has the eviction fence 1329 * attached 1330 */ 1331 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1332 process_info->eviction_fence); 1333 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, 1334 mem->va + bo_size * (1 + mem->aql_queue)); 1335 1336 /* Remove from VM internal data structures */ 1337 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list) 1338 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev, 1339 entry, bo_size); 1340 1341 ret = unreserve_bo_and_vms(&ctx, false, false); 1342 1343 /* Free the sync object */ 1344 amdgpu_sync_free(&mem->sync); 1345 1346 /* If the SG is not NULL, it's one we created for a doorbell or mmio 1347 * remap BO. We need to free it. 1348 */ 1349 if (mem->bo->tbo.sg) { 1350 sg_free_table(mem->bo->tbo.sg); 1351 kfree(mem->bo->tbo.sg); 1352 } 1353 1354 /* Update the size of the BO being freed if it was allocated from 1355 * VRAM and is not imported. 1356 */ 1357 if (size) { 1358 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) && 1359 (!is_imported)) 1360 *size = bo_size; 1361 else 1362 *size = 0; 1363 } 1364 1365 /* Free the BO*/ 1366 drm_gem_object_put(&mem->bo->tbo.base); 1367 mutex_destroy(&mem->lock); 1368 kfree(mem); 1369 1370 return ret; 1371 } 1372 1373 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1374 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) 1375 { 1376 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1377 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1378 int ret; 1379 struct amdgpu_bo *bo; 1380 uint32_t domain; 1381 struct kfd_bo_va_list *entry; 1382 struct bo_vm_reservation_context ctx; 1383 struct kfd_bo_va_list *bo_va_entry = NULL; 1384 struct kfd_bo_va_list *bo_va_entry_aql = NULL; 1385 unsigned long bo_size; 1386 bool is_invalid_userptr = false; 1387 1388 bo = mem->bo; 1389 if (!bo) { 1390 pr_err("Invalid BO when mapping memory to GPU\n"); 1391 return -EINVAL; 1392 } 1393 1394 /* Make sure restore is not running concurrently. Since we 1395 * don't map invalid userptr BOs, we rely on the next restore 1396 * worker to do the mapping 1397 */ 1398 mutex_lock(&mem->process_info->lock); 1399 1400 /* Lock mmap-sem. If we find an invalid userptr BO, we can be 1401 * sure that the MMU notifier is no longer running 1402 * concurrently and the queues are actually stopped 1403 */ 1404 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1405 mmap_write_lock(current->mm); 1406 is_invalid_userptr = atomic_read(&mem->invalid); 1407 mmap_write_unlock(current->mm); 1408 } 1409 1410 mutex_lock(&mem->lock); 1411 1412 domain = mem->domain; 1413 bo_size = bo->tbo.base.size; 1414 1415 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", 1416 mem->va, 1417 mem->va + bo_size * (1 + mem->aql_queue), 1418 vm, domain_string(domain)); 1419 1420 ret = reserve_bo_and_vm(mem, vm, &ctx); 1421 if (unlikely(ret)) 1422 goto out; 1423 1424 /* Userptr can be marked as "not invalid", but not actually be 1425 * validated yet (still in the system domain). In that case 1426 * the queues are still stopped and we can leave mapping for 1427 * the next restore worker 1428 */ 1429 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && 1430 bo->tbo.mem.mem_type == TTM_PL_SYSTEM) 1431 is_invalid_userptr = true; 1432 1433 if (check_if_add_bo_to_vm(avm, mem)) { 1434 ret = add_bo_to_vm(adev, mem, avm, false, 1435 &bo_va_entry); 1436 if (ret) 1437 goto add_bo_to_vm_failed; 1438 if (mem->aql_queue) { 1439 ret = add_bo_to_vm(adev, mem, avm, 1440 true, &bo_va_entry_aql); 1441 if (ret) 1442 goto add_bo_to_vm_failed_aql; 1443 } 1444 } else { 1445 ret = vm_validate_pt_pd_bos(avm); 1446 if (unlikely(ret)) 1447 goto add_bo_to_vm_failed; 1448 } 1449 1450 if (mem->mapped_to_gpu_memory == 0 && 1451 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1452 /* Validate BO only once. The eviction fence gets added to BO 1453 * the first time it is mapped. Validate will wait for all 1454 * background evictions to complete. 1455 */ 1456 ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 1457 if (ret) { 1458 pr_debug("Validate failed\n"); 1459 goto map_bo_to_gpuvm_failed; 1460 } 1461 } 1462 1463 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 1464 if (entry->bo_va->base.vm == vm && !entry->is_mapped) { 1465 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", 1466 entry->va, entry->va + bo_size, 1467 entry); 1468 1469 ret = map_bo_to_gpuvm(adev, entry, ctx.sync, 1470 is_invalid_userptr); 1471 if (ret) { 1472 pr_err("Failed to map bo to gpuvm\n"); 1473 goto map_bo_to_gpuvm_failed; 1474 } 1475 1476 ret = vm_update_pds(vm, ctx.sync); 1477 if (ret) { 1478 pr_err("Failed to update page directories\n"); 1479 goto map_bo_to_gpuvm_failed; 1480 } 1481 1482 entry->is_mapped = true; 1483 mem->mapped_to_gpu_memory++; 1484 pr_debug("\t INC mapping count %d\n", 1485 mem->mapped_to_gpu_memory); 1486 } 1487 } 1488 1489 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) 1490 amdgpu_bo_fence(bo, 1491 &avm->process_info->eviction_fence->base, 1492 true); 1493 ret = unreserve_bo_and_vms(&ctx, false, false); 1494 1495 goto out; 1496 1497 map_bo_to_gpuvm_failed: 1498 if (bo_va_entry_aql) 1499 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size); 1500 add_bo_to_vm_failed_aql: 1501 if (bo_va_entry) 1502 remove_bo_from_vm(adev, bo_va_entry, bo_size); 1503 add_bo_to_vm_failed: 1504 unreserve_bo_and_vms(&ctx, false, false); 1505 out: 1506 mutex_unlock(&mem->process_info->lock); 1507 mutex_unlock(&mem->lock); 1508 return ret; 1509 } 1510 1511 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 1512 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) 1513 { 1514 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1515 struct amdkfd_process_info *process_info = 1516 ((struct amdgpu_vm *)vm)->process_info; 1517 unsigned long bo_size = mem->bo->tbo.base.size; 1518 struct kfd_bo_va_list *entry; 1519 struct bo_vm_reservation_context ctx; 1520 int ret; 1521 1522 mutex_lock(&mem->lock); 1523 1524 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx); 1525 if (unlikely(ret)) 1526 goto out; 1527 /* If no VMs were reserved, it means the BO wasn't actually mapped */ 1528 if (ctx.n_vms == 0) { 1529 ret = -EINVAL; 1530 goto unreserve_out; 1531 } 1532 1533 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm); 1534 if (unlikely(ret)) 1535 goto unreserve_out; 1536 1537 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", 1538 mem->va, 1539 mem->va + bo_size * (1 + mem->aql_queue), 1540 vm); 1541 1542 list_for_each_entry(entry, &mem->bo_va_list, bo_list) { 1543 if (entry->bo_va->base.vm == vm && entry->is_mapped) { 1544 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", 1545 entry->va, 1546 entry->va + bo_size, 1547 entry); 1548 1549 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync); 1550 if (ret == 0) { 1551 entry->is_mapped = false; 1552 } else { 1553 pr_err("failed to unmap VA 0x%llx\n", 1554 mem->va); 1555 goto unreserve_out; 1556 } 1557 1558 mem->mapped_to_gpu_memory--; 1559 pr_debug("\t DEC mapping count %d\n", 1560 mem->mapped_to_gpu_memory); 1561 } 1562 } 1563 1564 /* If BO is unmapped from all VMs, unfence it. It can be evicted if 1565 * required. 1566 */ 1567 if (mem->mapped_to_gpu_memory == 0 && 1568 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && 1569 !mem->bo->tbo.pin_count) 1570 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1571 process_info->eviction_fence); 1572 1573 unreserve_out: 1574 unreserve_bo_and_vms(&ctx, false, false); 1575 out: 1576 mutex_unlock(&mem->lock); 1577 return ret; 1578 } 1579 1580 int amdgpu_amdkfd_gpuvm_sync_memory( 1581 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr) 1582 { 1583 struct amdgpu_sync sync; 1584 int ret; 1585 1586 amdgpu_sync_create(&sync); 1587 1588 mutex_lock(&mem->lock); 1589 amdgpu_sync_clone(&mem->sync, &sync); 1590 mutex_unlock(&mem->lock); 1591 1592 ret = amdgpu_sync_wait(&sync, intr); 1593 amdgpu_sync_free(&sync); 1594 return ret; 1595 } 1596 1597 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, 1598 struct kgd_mem *mem, void **kptr, uint64_t *size) 1599 { 1600 int ret; 1601 struct amdgpu_bo *bo = mem->bo; 1602 1603 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 1604 pr_err("userptr can't be mapped to kernel\n"); 1605 return -EINVAL; 1606 } 1607 1608 /* delete kgd_mem from kfd_bo_list to avoid re-validating 1609 * this BO in BO's restoring after eviction. 1610 */ 1611 mutex_lock(&mem->process_info->lock); 1612 1613 ret = amdgpu_bo_reserve(bo, true); 1614 if (ret) { 1615 pr_err("Failed to reserve bo. ret %d\n", ret); 1616 goto bo_reserve_failed; 1617 } 1618 1619 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); 1620 if (ret) { 1621 pr_err("Failed to pin bo. ret %d\n", ret); 1622 goto pin_failed; 1623 } 1624 1625 ret = amdgpu_bo_kmap(bo, kptr); 1626 if (ret) { 1627 pr_err("Failed to map bo to kernel. ret %d\n", ret); 1628 goto kmap_failed; 1629 } 1630 1631 amdgpu_amdkfd_remove_eviction_fence( 1632 bo, mem->process_info->eviction_fence); 1633 list_del_init(&mem->validate_list.head); 1634 1635 if (size) 1636 *size = amdgpu_bo_size(bo); 1637 1638 amdgpu_bo_unreserve(bo); 1639 1640 mutex_unlock(&mem->process_info->lock); 1641 return 0; 1642 1643 kmap_failed: 1644 amdgpu_bo_unpin(bo); 1645 pin_failed: 1646 amdgpu_bo_unreserve(bo); 1647 bo_reserve_failed: 1648 mutex_unlock(&mem->process_info->lock); 1649 1650 return ret; 1651 } 1652 1653 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, 1654 struct kfd_vm_fault_info *mem) 1655 { 1656 struct amdgpu_device *adev; 1657 1658 adev = (struct amdgpu_device *)kgd; 1659 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { 1660 *mem = *adev->gmc.vm_fault_info; 1661 mb(); 1662 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 1663 } 1664 return 0; 1665 } 1666 1667 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, 1668 struct dma_buf *dma_buf, 1669 uint64_t va, void *vm, 1670 struct kgd_mem **mem, uint64_t *size, 1671 uint64_t *mmap_offset) 1672 { 1673 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 1674 struct drm_gem_object *obj; 1675 struct amdgpu_bo *bo; 1676 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1677 1678 if (dma_buf->ops != &amdgpu_dmabuf_ops) 1679 /* Can't handle non-graphics buffers */ 1680 return -EINVAL; 1681 1682 obj = dma_buf->priv; 1683 if (drm_to_adev(obj->dev) != adev) 1684 /* Can't handle buffers from other devices */ 1685 return -EINVAL; 1686 1687 bo = gem_to_amdgpu_bo(obj); 1688 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | 1689 AMDGPU_GEM_DOMAIN_GTT))) 1690 /* Only VRAM and GTT BOs are supported */ 1691 return -EINVAL; 1692 1693 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 1694 if (!*mem) 1695 return -ENOMEM; 1696 1697 if (size) 1698 *size = amdgpu_bo_size(bo); 1699 1700 if (mmap_offset) 1701 *mmap_offset = amdgpu_bo_mmap_offset(bo); 1702 1703 INIT_LIST_HEAD(&(*mem)->bo_va_list); 1704 mutex_init(&(*mem)->lock); 1705 1706 (*mem)->alloc_flags = 1707 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1708 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) 1709 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE 1710 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 1711 1712 drm_gem_object_get(&bo->tbo.base); 1713 (*mem)->bo = bo; 1714 (*mem)->va = va; 1715 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? 1716 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; 1717 (*mem)->mapped_to_gpu_memory = 0; 1718 (*mem)->process_info = avm->process_info; 1719 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); 1720 amdgpu_sync_create(&(*mem)->sync); 1721 (*mem)->is_imported = true; 1722 1723 return 0; 1724 } 1725 1726 /* Evict a userptr BO by stopping the queues if necessary 1727 * 1728 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it 1729 * cannot do any memory allocations, and cannot take any locks that 1730 * are held elsewhere while allocating memory. Therefore this is as 1731 * simple as possible, using atomic counters. 1732 * 1733 * It doesn't do anything to the BO itself. The real work happens in 1734 * restore, where we get updated page addresses. This function only 1735 * ensures that GPU access to the BO is stopped. 1736 */ 1737 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, 1738 struct mm_struct *mm) 1739 { 1740 struct amdkfd_process_info *process_info = mem->process_info; 1741 int evicted_bos; 1742 int r = 0; 1743 1744 atomic_inc(&mem->invalid); 1745 evicted_bos = atomic_inc_return(&process_info->evicted_bos); 1746 if (evicted_bos == 1) { 1747 /* First eviction, stop the queues */ 1748 r = kgd2kfd_quiesce_mm(mm); 1749 if (r) 1750 pr_err("Failed to quiesce KFD\n"); 1751 schedule_delayed_work(&process_info->restore_userptr_work, 1752 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 1753 } 1754 1755 return r; 1756 } 1757 1758 /* Update invalid userptr BOs 1759 * 1760 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to 1761 * userptr_inval_list and updates user pages for all BOs that have 1762 * been invalidated since their last update. 1763 */ 1764 static int update_invalid_user_pages(struct amdkfd_process_info *process_info, 1765 struct mm_struct *mm) 1766 { 1767 struct kgd_mem *mem, *tmp_mem; 1768 struct amdgpu_bo *bo; 1769 struct ttm_operation_ctx ctx = { false, false }; 1770 int invalid, ret; 1771 1772 /* Move all invalidated BOs to the userptr_inval_list and 1773 * release their user pages by migration to the CPU domain 1774 */ 1775 list_for_each_entry_safe(mem, tmp_mem, 1776 &process_info->userptr_valid_list, 1777 validate_list.head) { 1778 if (!atomic_read(&mem->invalid)) 1779 continue; /* BO is still valid */ 1780 1781 bo = mem->bo; 1782 1783 if (amdgpu_bo_reserve(bo, true)) 1784 return -EAGAIN; 1785 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 1786 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1787 amdgpu_bo_unreserve(bo); 1788 if (ret) { 1789 pr_err("%s: Failed to invalidate userptr BO\n", 1790 __func__); 1791 return -EAGAIN; 1792 } 1793 1794 list_move_tail(&mem->validate_list.head, 1795 &process_info->userptr_inval_list); 1796 } 1797 1798 if (list_empty(&process_info->userptr_inval_list)) 1799 return 0; /* All evicted userptr BOs were freed */ 1800 1801 /* Go through userptr_inval_list and update any invalid user_pages */ 1802 list_for_each_entry(mem, &process_info->userptr_inval_list, 1803 validate_list.head) { 1804 invalid = atomic_read(&mem->invalid); 1805 if (!invalid) 1806 /* BO hasn't been invalidated since the last 1807 * revalidation attempt. Keep its BO list. 1808 */ 1809 continue; 1810 1811 bo = mem->bo; 1812 1813 /* Get updated user pages */ 1814 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); 1815 if (ret) { 1816 pr_debug("%s: Failed to get user pages: %d\n", 1817 __func__, ret); 1818 1819 /* Return error -EBUSY or -ENOMEM, retry restore */ 1820 return ret; 1821 } 1822 1823 /* 1824 * FIXME: Cannot ignore the return code, must hold 1825 * notifier_lock 1826 */ 1827 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 1828 1829 /* Mark the BO as valid unless it was invalidated 1830 * again concurrently. 1831 */ 1832 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) 1833 return -EAGAIN; 1834 } 1835 1836 return 0; 1837 } 1838 1839 /* Validate invalid userptr BOs 1840 * 1841 * Validates BOs on the userptr_inval_list, and moves them back to the 1842 * userptr_valid_list. Also updates GPUVM page tables with new page 1843 * addresses and waits for the page table updates to complete. 1844 */ 1845 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) 1846 { 1847 struct amdgpu_bo_list_entry *pd_bo_list_entries; 1848 struct list_head resv_list, duplicates; 1849 struct ww_acquire_ctx ticket; 1850 struct amdgpu_sync sync; 1851 1852 struct amdgpu_vm *peer_vm; 1853 struct kgd_mem *mem, *tmp_mem; 1854 struct amdgpu_bo *bo; 1855 struct ttm_operation_ctx ctx = { false, false }; 1856 int i, ret; 1857 1858 pd_bo_list_entries = kcalloc(process_info->n_vms, 1859 sizeof(struct amdgpu_bo_list_entry), 1860 GFP_KERNEL); 1861 if (!pd_bo_list_entries) { 1862 pr_err("%s: Failed to allocate PD BO list entries\n", __func__); 1863 ret = -ENOMEM; 1864 goto out_no_mem; 1865 } 1866 1867 INIT_LIST_HEAD(&resv_list); 1868 INIT_LIST_HEAD(&duplicates); 1869 1870 /* Get all the page directory BOs that need to be reserved */ 1871 i = 0; 1872 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1873 vm_list_node) 1874 amdgpu_vm_get_pd_bo(peer_vm, &resv_list, 1875 &pd_bo_list_entries[i++]); 1876 /* Add the userptr_inval_list entries to resv_list */ 1877 list_for_each_entry(mem, &process_info->userptr_inval_list, 1878 validate_list.head) { 1879 list_add_tail(&mem->resv_list.head, &resv_list); 1880 mem->resv_list.bo = mem->validate_list.bo; 1881 mem->resv_list.num_shared = mem->validate_list.num_shared; 1882 } 1883 1884 /* Reserve all BOs and page tables for validation */ 1885 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); 1886 WARN(!list_empty(&duplicates), "Duplicates should be empty"); 1887 if (ret) 1888 goto out_free; 1889 1890 amdgpu_sync_create(&sync); 1891 1892 ret = process_validate_vms(process_info); 1893 if (ret) 1894 goto unreserve_out; 1895 1896 /* Validate BOs and update GPUVM page tables */ 1897 list_for_each_entry_safe(mem, tmp_mem, 1898 &process_info->userptr_inval_list, 1899 validate_list.head) { 1900 struct kfd_bo_va_list *bo_va_entry; 1901 1902 bo = mem->bo; 1903 1904 /* Validate the BO if we got user pages */ 1905 if (bo->tbo.ttm->pages[0]) { 1906 amdgpu_bo_placement_from_domain(bo, mem->domain); 1907 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1908 if (ret) { 1909 pr_err("%s: failed to validate BO\n", __func__); 1910 goto unreserve_out; 1911 } 1912 } 1913 1914 list_move_tail(&mem->validate_list.head, 1915 &process_info->userptr_valid_list); 1916 1917 /* Update mapping. If the BO was not validated 1918 * (because we couldn't get user pages), this will 1919 * clear the page table entries, which will result in 1920 * VM faults if the GPU tries to access the invalid 1921 * memory. 1922 */ 1923 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) { 1924 if (!bo_va_entry->is_mapped) 1925 continue; 1926 1927 ret = update_gpuvm_pte((struct amdgpu_device *) 1928 bo_va_entry->kgd_dev, 1929 bo_va_entry, &sync); 1930 if (ret) { 1931 pr_err("%s: update PTE failed\n", __func__); 1932 /* make sure this gets validated again */ 1933 atomic_inc(&mem->invalid); 1934 goto unreserve_out; 1935 } 1936 } 1937 } 1938 1939 /* Update page directories */ 1940 ret = process_update_pds(process_info, &sync); 1941 1942 unreserve_out: 1943 ttm_eu_backoff_reservation(&ticket, &resv_list); 1944 amdgpu_sync_wait(&sync, false); 1945 amdgpu_sync_free(&sync); 1946 out_free: 1947 kfree(pd_bo_list_entries); 1948 out_no_mem: 1949 1950 return ret; 1951 } 1952 1953 /* Worker callback to restore evicted userptr BOs 1954 * 1955 * Tries to update and validate all userptr BOs. If successful and no 1956 * concurrent evictions happened, the queues are restarted. Otherwise, 1957 * reschedule for another attempt later. 1958 */ 1959 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) 1960 { 1961 struct delayed_work *dwork = to_delayed_work(work); 1962 struct amdkfd_process_info *process_info = 1963 container_of(dwork, struct amdkfd_process_info, 1964 restore_userptr_work); 1965 struct task_struct *usertask; 1966 struct mm_struct *mm; 1967 int evicted_bos; 1968 1969 evicted_bos = atomic_read(&process_info->evicted_bos); 1970 if (!evicted_bos) 1971 return; 1972 1973 /* Reference task and mm in case of concurrent process termination */ 1974 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); 1975 if (!usertask) 1976 return; 1977 mm = get_task_mm(usertask); 1978 if (!mm) { 1979 put_task_struct(usertask); 1980 return; 1981 } 1982 1983 mutex_lock(&process_info->lock); 1984 1985 if (update_invalid_user_pages(process_info, mm)) 1986 goto unlock_out; 1987 /* userptr_inval_list can be empty if all evicted userptr BOs 1988 * have been freed. In that case there is nothing to validate 1989 * and we can just restart the queues. 1990 */ 1991 if (!list_empty(&process_info->userptr_inval_list)) { 1992 if (atomic_read(&process_info->evicted_bos) != evicted_bos) 1993 goto unlock_out; /* Concurrent eviction, try again */ 1994 1995 if (validate_invalid_user_pages(process_info)) 1996 goto unlock_out; 1997 } 1998 /* Final check for concurrent evicton and atomic update. If 1999 * another eviction happens after successful update, it will 2000 * be a first eviction that calls quiesce_mm. The eviction 2001 * reference counting inside KFD will handle this case. 2002 */ 2003 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != 2004 evicted_bos) 2005 goto unlock_out; 2006 evicted_bos = 0; 2007 if (kgd2kfd_resume_mm(mm)) { 2008 pr_err("%s: Failed to resume KFD\n", __func__); 2009 /* No recovery from this failure. Probably the CP is 2010 * hanging. No point trying again. 2011 */ 2012 } 2013 2014 unlock_out: 2015 mutex_unlock(&process_info->lock); 2016 mmput(mm); 2017 put_task_struct(usertask); 2018 2019 /* If validation failed, reschedule another attempt */ 2020 if (evicted_bos) 2021 schedule_delayed_work(&process_info->restore_userptr_work, 2022 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); 2023 } 2024 2025 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given 2026 * KFD process identified by process_info 2027 * 2028 * @process_info: amdkfd_process_info of the KFD process 2029 * 2030 * After memory eviction, restore thread calls this function. The function 2031 * should be called when the Process is still valid. BO restore involves - 2032 * 2033 * 1. Release old eviction fence and create new one 2034 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list. 2035 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of 2036 * BOs that need to be reserved. 2037 * 4. Reserve all the BOs 2038 * 5. Validate of PD and PT BOs. 2039 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence 2040 * 7. Add fence to all PD and PT BOs. 2041 * 8. Unreserve all BOs 2042 */ 2043 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) 2044 { 2045 struct amdgpu_bo_list_entry *pd_bo_list; 2046 struct amdkfd_process_info *process_info = info; 2047 struct amdgpu_vm *peer_vm; 2048 struct kgd_mem *mem; 2049 struct bo_vm_reservation_context ctx; 2050 struct amdgpu_amdkfd_fence *new_fence; 2051 int ret = 0, i; 2052 struct list_head duplicate_save; 2053 struct amdgpu_sync sync_obj; 2054 unsigned long failed_size = 0; 2055 unsigned long total_size = 0; 2056 2057 INIT_LIST_HEAD(&duplicate_save); 2058 INIT_LIST_HEAD(&ctx.list); 2059 INIT_LIST_HEAD(&ctx.duplicates); 2060 2061 pd_bo_list = kcalloc(process_info->n_vms, 2062 sizeof(struct amdgpu_bo_list_entry), 2063 GFP_KERNEL); 2064 if (!pd_bo_list) 2065 return -ENOMEM; 2066 2067 i = 0; 2068 mutex_lock(&process_info->lock); 2069 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2070 vm_list_node) 2071 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]); 2072 2073 /* Reserve all BOs and page tables/directory. Add all BOs from 2074 * kfd_bo_list to ctx.list 2075 */ 2076 list_for_each_entry(mem, &process_info->kfd_bo_list, 2077 validate_list.head) { 2078 2079 list_add_tail(&mem->resv_list.head, &ctx.list); 2080 mem->resv_list.bo = mem->validate_list.bo; 2081 mem->resv_list.num_shared = mem->validate_list.num_shared; 2082 } 2083 2084 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, 2085 false, &duplicate_save); 2086 if (ret) { 2087 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); 2088 goto ttm_reserve_fail; 2089 } 2090 2091 amdgpu_sync_create(&sync_obj); 2092 2093 /* Validate PDs and PTs */ 2094 ret = process_validate_vms(process_info); 2095 if (ret) 2096 goto validate_map_fail; 2097 2098 ret = process_sync_pds_resv(process_info, &sync_obj); 2099 if (ret) { 2100 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); 2101 goto validate_map_fail; 2102 } 2103 2104 /* Validate BOs and map them to GPUVM (update VM page tables). */ 2105 list_for_each_entry(mem, &process_info->kfd_bo_list, 2106 validate_list.head) { 2107 2108 struct amdgpu_bo *bo = mem->bo; 2109 uint32_t domain = mem->domain; 2110 struct kfd_bo_va_list *bo_va_entry; 2111 2112 total_size += amdgpu_bo_size(bo); 2113 2114 ret = amdgpu_amdkfd_bo_validate(bo, domain, false); 2115 if (ret) { 2116 pr_debug("Memory eviction: Validate BOs failed\n"); 2117 failed_size += amdgpu_bo_size(bo); 2118 ret = amdgpu_amdkfd_bo_validate(bo, 2119 AMDGPU_GEM_DOMAIN_GTT, false); 2120 if (ret) { 2121 pr_debug("Memory eviction: Try again\n"); 2122 goto validate_map_fail; 2123 } 2124 } 2125 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving); 2126 if (ret) { 2127 pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); 2128 goto validate_map_fail; 2129 } 2130 list_for_each_entry(bo_va_entry, &mem->bo_va_list, 2131 bo_list) { 2132 ret = update_gpuvm_pte((struct amdgpu_device *) 2133 bo_va_entry->kgd_dev, 2134 bo_va_entry, 2135 &sync_obj); 2136 if (ret) { 2137 pr_debug("Memory eviction: update PTE failed. Try again\n"); 2138 goto validate_map_fail; 2139 } 2140 } 2141 } 2142 2143 if (failed_size) 2144 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); 2145 2146 /* Update page directories */ 2147 ret = process_update_pds(process_info, &sync_obj); 2148 if (ret) { 2149 pr_debug("Memory eviction: update PDs failed. Try again\n"); 2150 goto validate_map_fail; 2151 } 2152 2153 /* Wait for validate and PT updates to finish */ 2154 amdgpu_sync_wait(&sync_obj, false); 2155 2156 /* Release old eviction fence and create new one, because fence only 2157 * goes from unsignaled to signaled, fence cannot be reused. 2158 * Use context and mm from the old fence. 2159 */ 2160 new_fence = amdgpu_amdkfd_fence_create( 2161 process_info->eviction_fence->base.context, 2162 process_info->eviction_fence->mm); 2163 if (!new_fence) { 2164 pr_err("Failed to create eviction fence\n"); 2165 ret = -ENOMEM; 2166 goto validate_map_fail; 2167 } 2168 dma_fence_put(&process_info->eviction_fence->base); 2169 process_info->eviction_fence = new_fence; 2170 *ef = dma_fence_get(&new_fence->base); 2171 2172 /* Attach new eviction fence to all BOs */ 2173 list_for_each_entry(mem, &process_info->kfd_bo_list, 2174 validate_list.head) 2175 amdgpu_bo_fence(mem->bo, 2176 &process_info->eviction_fence->base, true); 2177 2178 /* Attach eviction fence to PD / PT BOs */ 2179 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2180 vm_list_node) { 2181 struct amdgpu_bo *bo = peer_vm->root.base.bo; 2182 2183 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); 2184 } 2185 2186 validate_map_fail: 2187 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list); 2188 amdgpu_sync_free(&sync_obj); 2189 ttm_reserve_fail: 2190 mutex_unlock(&process_info->lock); 2191 kfree(pd_bo_list); 2192 return ret; 2193 } 2194 2195 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem) 2196 { 2197 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2198 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws; 2199 int ret; 2200 2201 if (!info || !gws) 2202 return -EINVAL; 2203 2204 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); 2205 if (!*mem) 2206 return -ENOMEM; 2207 2208 mutex_init(&(*mem)->lock); 2209 INIT_LIST_HEAD(&(*mem)->bo_va_list); 2210 (*mem)->bo = amdgpu_bo_ref(gws_bo); 2211 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; 2212 (*mem)->process_info = process_info; 2213 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false); 2214 amdgpu_sync_create(&(*mem)->sync); 2215 2216 2217 /* Validate gws bo the first time it is added to process */ 2218 mutex_lock(&(*mem)->process_info->lock); 2219 ret = amdgpu_bo_reserve(gws_bo, false); 2220 if (unlikely(ret)) { 2221 pr_err("Reserve gws bo failed %d\n", ret); 2222 goto bo_reservation_failure; 2223 } 2224 2225 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true); 2226 if (ret) { 2227 pr_err("GWS BO validate failed %d\n", ret); 2228 goto bo_validation_failure; 2229 } 2230 /* GWS resource is shared b/t amdgpu and amdkfd 2231 * Add process eviction fence to bo so they can 2232 * evict each other. 2233 */ 2234 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); 2235 if (ret) 2236 goto reserve_shared_fail; 2237 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); 2238 amdgpu_bo_unreserve(gws_bo); 2239 mutex_unlock(&(*mem)->process_info->lock); 2240 2241 return ret; 2242 2243 reserve_shared_fail: 2244 bo_validation_failure: 2245 amdgpu_bo_unreserve(gws_bo); 2246 bo_reservation_failure: 2247 mutex_unlock(&(*mem)->process_info->lock); 2248 amdgpu_sync_free(&(*mem)->sync); 2249 remove_kgd_mem_from_kfd_bo_list(*mem, process_info); 2250 amdgpu_bo_unref(&gws_bo); 2251 mutex_destroy(&(*mem)->lock); 2252 kfree(*mem); 2253 *mem = NULL; 2254 return ret; 2255 } 2256 2257 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) 2258 { 2259 int ret; 2260 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info; 2261 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 2262 struct amdgpu_bo *gws_bo = kgd_mem->bo; 2263 2264 /* Remove BO from process's validate list so restore worker won't touch 2265 * it anymore 2266 */ 2267 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info); 2268 2269 ret = amdgpu_bo_reserve(gws_bo, false); 2270 if (unlikely(ret)) { 2271 pr_err("Reserve gws bo failed %d\n", ret); 2272 //TODO add BO back to validate_list? 2273 return ret; 2274 } 2275 amdgpu_amdkfd_remove_eviction_fence(gws_bo, 2276 process_info->eviction_fence); 2277 amdgpu_bo_unreserve(gws_bo); 2278 amdgpu_sync_free(&kgd_mem->sync); 2279 amdgpu_bo_unref(&gws_bo); 2280 mutex_destroy(&kgd_mem->lock); 2281 kfree(mem); 2282 return 0; 2283 } 2284 2285 /* Returns GPU-specific tiling mode information */ 2286 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, 2287 struct tile_config *config) 2288 { 2289 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 2290 2291 config->gb_addr_config = adev->gfx.config.gb_addr_config; 2292 config->tile_config_ptr = adev->gfx.config.tile_mode_array; 2293 config->num_tile_configs = 2294 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 2295 config->macro_tile_config_ptr = 2296 adev->gfx.config.macrotile_mode_array; 2297 config->num_macro_tile_configs = 2298 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 2299 2300 /* Those values are not set from GFX9 onwards */ 2301 config->num_banks = adev->gfx.config.num_banks; 2302 config->num_ranks = adev->gfx.config.num_ranks; 2303 2304 return 0; 2305 } 2306