Lines Matching refs:vm

122 	struct amdgpu_vm *vm;  member
141 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_set_pasid() argument
146 if (vm->pasid == pasid) in amdgpu_vm_set_pasid()
149 if (vm->pasid) { in amdgpu_vm_set_pasid()
150 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid()
154 vm->pasid = 0; in amdgpu_vm_set_pasid()
158 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid()
163 vm->pasid = pasid; in amdgpu_vm_set_pasid()
180 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local
184 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
186 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
188 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
189 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
201 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
202 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
203 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
216 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
217 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
218 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
232 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
233 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
234 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
248 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted_user()
249 list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user); in amdgpu_vm_bo_evicted_user()
250 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted_user()
264 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
265 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
266 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
282 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
283 list_move(&vm_bo->vm_status, &vm_bo->vm->done); in amdgpu_vm_bo_done()
284 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
294 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) in amdgpu_vm_bo_reset_state_machine() argument
298 spin_lock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
299 list_splice_init(&vm->done, &vm->invalidated); in amdgpu_vm_bo_reset_state_machine()
300 list_for_each_entry(vm_bo, &vm->invalidated, vm_status) in amdgpu_vm_bo_reset_state_machine()
302 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { in amdgpu_vm_bo_reset_state_machine()
307 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_reset_state_machine()
309 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_reset_state_machine()
311 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
324 struct amdgpu_vm *vm = base->vm; in amdgpu_vm_update_shared() local
330 spin_lock(&vm->status_lock); in amdgpu_vm_update_shared()
335 vm->stats[bo_memtype].drm.shared += size; in amdgpu_vm_update_shared()
336 vm->stats[bo_memtype].drm.private -= size; in amdgpu_vm_update_shared()
338 vm->stats[bo_memtype].drm.shared -= size; in amdgpu_vm_update_shared()
339 vm->stats[bo_memtype].drm.private += size; in amdgpu_vm_update_shared()
342 spin_unlock(&vm->status_lock); in amdgpu_vm_update_shared()
373 struct amdgpu_vm *vm = base->vm; in amdgpu_vm_update_stats_locked() local
382 vm->stats[bo_memtype].drm.shared += size; in amdgpu_vm_update_stats_locked()
384 vm->stats[bo_memtype].drm.private += size; in amdgpu_vm_update_stats_locked()
389 vm->stats[res_memtype].drm.resident += size; in amdgpu_vm_update_stats_locked()
394 vm->stats[res_memtype].drm.purgeable += size; in amdgpu_vm_update_stats_locked()
396 vm->stats[bo_memtype].evicted += size; in amdgpu_vm_update_stats_locked()
412 struct amdgpu_vm *vm = base->vm; in amdgpu_vm_update_stats() local
414 spin_lock(&vm->status_lock); in amdgpu_vm_update_stats()
416 spin_unlock(&vm->status_lock); in amdgpu_vm_update_stats()
430 struct amdgpu_vm *vm, struct amdgpu_bo *bo) in amdgpu_vm_bo_base_init() argument
432 base->vm = vm; in amdgpu_vm_bo_base_init()
442 spin_lock(&vm->status_lock); in amdgpu_vm_bo_base_init()
445 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_base_init()
447 if (!amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_base_init()
450 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_base_init()
452 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_bo_base_init()
479 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, in amdgpu_vm_lock_pd() argument
483 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, in amdgpu_vm_lock_pd()
497 struct amdgpu_vm *vm) in amdgpu_vm_move_to_lru_tail() argument
500 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
506 struct amdgpu_vm *vm) in amdgpu_vm_init_entities() argument
510 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
516 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
521 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_init_entities()
526 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm) in amdgpu_vm_fini_entities() argument
528 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_fini_entities()
529 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini_entities()
541 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_generation() argument
545 if (!vm) in amdgpu_vm_generation()
548 result += lower_32_bits(vm->generation); in amdgpu_vm_generation()
550 if (drm_sched_entity_error(&vm->delayed)) in amdgpu_vm_generation()
572 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_validate() argument
577 uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); in amdgpu_vm_validate()
582 if (vm->generation != new_vm_generation) { in amdgpu_vm_validate()
583 vm->generation = new_vm_generation; in amdgpu_vm_validate()
584 amdgpu_vm_bo_reset_state_machine(vm); in amdgpu_vm_validate()
585 amdgpu_vm_fini_entities(vm); in amdgpu_vm_validate()
586 r = amdgpu_vm_init_entities(adev, vm); in amdgpu_vm_validate()
591 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
592 while (!list_empty(&vm->evicted)) { in amdgpu_vm_validate()
593 bo_base = list_first_entry(&vm->evicted, in amdgpu_vm_validate()
596 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
607 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); in amdgpu_vm_validate()
610 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
612 while (ticket && !list_empty(&vm->evicted_user)) { in amdgpu_vm_validate()
613 bo_base = list_first_entry(&vm->evicted_user, in amdgpu_vm_validate()
616 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
621 struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm); in amdgpu_vm_validate()
638 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
640 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
642 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_validate()
643 vm->evicting = false; in amdgpu_vm_validate()
644 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_validate()
659 bool amdgpu_vm_ready(struct amdgpu_vm *vm) in amdgpu_vm_ready() argument
664 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_ready()
665 ret = !vm->evicting; in amdgpu_vm_ready()
666 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_ready()
668 spin_lock(&vm->status_lock); in amdgpu_vm_ready()
669 empty = list_empty(&vm->evicted); in amdgpu_vm_ready()
670 spin_unlock(&vm->status_lock); in amdgpu_vm_ready()
891 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, in amdgpu_vm_bo_find() argument
897 if (base->vm != vm) in amdgpu_vm_bo_find()
945 struct amdgpu_vm *vm, bool immediate) in amdgpu_vm_update_pdes() argument
953 spin_lock(&vm->status_lock); in amdgpu_vm_update_pdes()
954 list_splice_init(&vm->relocated, &relocated); in amdgpu_vm_update_pdes()
955 spin_unlock(&vm->status_lock); in amdgpu_vm_update_pdes()
965 params.vm = vm; in amdgpu_vm_update_pdes()
968 r = vm->update_funcs->prepare(&params, NULL); in amdgpu_vm_update_pdes()
981 r = vm->update_funcs->commit(&params, &vm->last_update); in amdgpu_vm_update_pdes()
986 atomic64_inc(&vm->tlb_seq); in amdgpu_vm_update_pdes()
1012 atomic64_inc(&tlb_cb->vm->tlb_seq); in amdgpu_vm_tlb_seq_cb()
1030 struct amdgpu_vm *vm = params->vm; in amdgpu_vm_tlb_flush() local
1032 tlb_cb->vm = vm; in amdgpu_vm_tlb_flush()
1040 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_tlb_flush()
1041 vm->last_tlb_flush = dma_fence_get(*fence); in amdgpu_vm_tlb_flush()
1047 if (!params->unlocked && vm->is_compute_context) { in amdgpu_vm_tlb_flush()
1048 amdgpu_vm_tlb_fence_create(params->adev, vm, fence); in amdgpu_vm_tlb_flush()
1051 dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence, in amdgpu_vm_tlb_flush()
1080 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_update_range() argument
1115 params.vm = vm; in amdgpu_vm_update_range()
1123 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_update_range()
1124 if (vm->evicting) { in amdgpu_vm_update_range()
1129 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { in amdgpu_vm_update_range()
1132 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); in amdgpu_vm_update_range()
1133 swap(vm->last_unlocked, tmp); in amdgpu_vm_update_range()
1137 r = vm->update_funcs->prepare(&params, sync); in amdgpu_vm_update_range()
1195 r = vm->update_funcs->commit(&params, fence); in amdgpu_vm_update_range()
1208 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_update_range()
1213 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, in amdgpu_vm_get_memory() argument
1216 spin_lock(&vm->status_lock); in amdgpu_vm_get_memory()
1217 memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM); in amdgpu_vm_get_memory()
1218 spin_unlock(&vm->status_lock); in amdgpu_vm_get_memory()
1237 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update() local
1256 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv, in amdgpu_vm_bo_update()
1257 AMDGPU_SYNC_EQ_OWNER, vm); in amdgpu_vm_bo_update()
1289 AMDGPU_SYNC_EXPLICIT, vm); in amdgpu_vm_bo_update()
1311 if (clear || amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_update()
1312 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1340 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, in amdgpu_vm_bo_update()
1353 if (amdgpu_vm_is_bo_always_valid(vm, bo)) { in amdgpu_vm_bo_update()
1473 struct amdgpu_vm *vm, in amdgpu_vm_free_mapping() argument
1490 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_prt_fini() argument
1492 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
1519 struct amdgpu_vm *vm, in amdgpu_vm_clear_freed() argument
1533 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv, in amdgpu_vm_clear_freed()
1534 AMDGPU_SYNC_EQ_OWNER, vm); in amdgpu_vm_clear_freed()
1538 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
1539 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
1543 r = amdgpu_vm_update_range(adev, vm, false, false, true, false, in amdgpu_vm_clear_freed()
1546 amdgpu_vm_free_mapping(adev, vm, mapping, f); in amdgpu_vm_clear_freed()
1581 struct amdgpu_vm *vm, in amdgpu_vm_handle_moved() argument
1589 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1590 while (!list_empty(&vm->moved)) { in amdgpu_vm_handle_moved()
1591 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1593 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1599 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1602 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
1603 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1606 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1632 if (vm->is_compute_context && in amdgpu_vm_handle_moved()
1638 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1640 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1659 struct amdgpu_vm *vm, in amdgpu_vm_flush_compute_tlb() argument
1663 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); in amdgpu_vm_flush_compute_tlb()
1667 WARN_ON_ONCE(!vm->is_compute_context); in amdgpu_vm_flush_compute_tlb()
1674 if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq) in amdgpu_vm_flush_compute_tlb()
1682 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type, in amdgpu_vm_flush_compute_tlb()
1706 struct amdgpu_vm *vm, in amdgpu_vm_bo_add() argument
1715 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
1749 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map() local
1754 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
1759 if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved) in amdgpu_vm_bo_insert_map()
1820 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map() local
1831 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
1891 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
1929 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap() local
1952 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
1957 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
1959 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_unmap()
1979 struct amdgpu_vm *vm, in amdgpu_vm_bo_clear_mappings() argument
2008 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
2039 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
2048 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
2056 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
2060 if (amdgpu_vm_is_bo_always_valid(vm, bo) && in amdgpu_vm_bo_clear_mappings()
2071 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
2075 if (amdgpu_vm_is_bo_always_valid(vm, bo) && in amdgpu_vm_bo_clear_mappings()
2097 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, in amdgpu_vm_bo_lookup_mapping() argument
2100 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
2111 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) in amdgpu_vm_bo_trace_cs() argument
2118 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
2148 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_del() local
2151 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_del()
2155 if (amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_del()
2169 spin_lock(&vm->status_lock); in amdgpu_vm_bo_del()
2171 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_del()
2175 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
2178 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_del()
2182 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
2183 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_del()
2207 if (!bo_base || !bo_base->vm) in amdgpu_vm_evictable()
2215 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) in amdgpu_vm_evictable()
2219 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { in amdgpu_vm_evictable()
2220 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2224 bo_base->vm->evicting = true; in amdgpu_vm_evictable()
2225 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2242 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate() local
2244 if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) { in amdgpu_vm_bo_invalidate()
2255 else if (amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_invalidate()
2277 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_move() local
2279 spin_lock(&vm->status_lock); in amdgpu_vm_bo_move()
2282 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_move()
2409 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) in amdgpu_vm_wait_idle() argument
2411 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, in amdgpu_vm_wait_idle()
2417 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); in amdgpu_vm_wait_idle()
2430 struct amdgpu_vm *vm; in amdgpu_vm_get_vm_from_pasid() local
2434 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_get_vm_from_pasid()
2437 return vm; in amdgpu_vm_get_vm_from_pasid()
2461 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm) in amdgpu_vm_get_task_info_vm() argument
2465 if (vm) { in amdgpu_vm_get_task_info_vm()
2466 ti = vm->task_info; in amdgpu_vm_get_task_info_vm()
2467 kref_get(&vm->task_info->refcount); in amdgpu_vm_get_task_info_vm()
2489 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm) in amdgpu_vm_create_task_info() argument
2491 vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL); in amdgpu_vm_create_task_info()
2492 if (!vm->task_info) in amdgpu_vm_create_task_info()
2495 kref_init(&vm->task_info->refcount); in amdgpu_vm_create_task_info()
2504 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) in amdgpu_vm_set_task_info() argument
2506 if (!vm->task_info) in amdgpu_vm_set_task_info()
2509 if (vm->task_info->pid == current->pid) in amdgpu_vm_set_task_info()
2512 vm->task_info->pid = current->pid; in amdgpu_vm_set_task_info()
2513 get_task_comm(vm->task_info->task_name, current); in amdgpu_vm_set_task_info()
2518 vm->task_info->tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
2519 get_task_comm(vm->task_info->process_name, current->group_leader); in amdgpu_vm_set_task_info()
2534 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_init() argument
2541 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2543 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2544 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2545 INIT_LIST_HEAD(&vm->evicted_user); in amdgpu_vm_init()
2546 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2547 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2548 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2549 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2550 spin_lock_init(&vm->status_lock); in amdgpu_vm_init()
2551 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2552 INIT_LIST_HEAD(&vm->done); in amdgpu_vm_init()
2553 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
2555 r = amdgpu_vm_init_entities(adev, vm); in amdgpu_vm_init()
2559 ttm_lru_bulk_move_init(&vm->lru_bulk_move); in amdgpu_vm_init()
2561 vm->is_compute_context = false; in amdgpu_vm_init()
2563 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2567 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2568 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_init()
2572 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2573 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2575 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2577 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_init()
2578 vm->last_unlocked = dma_fence_get_stub(); in amdgpu_vm_init()
2579 vm->last_tlb_flush = dma_fence_get_stub(); in amdgpu_vm_init()
2580 vm->generation = amdgpu_vm_generation(adev, NULL); in amdgpu_vm_init()
2582 mutex_init(&vm->eviction_lock); in amdgpu_vm_init()
2583 vm->evicting = false; in amdgpu_vm_init()
2584 vm->tlb_fence_context = dma_fence_context_alloc(1); in amdgpu_vm_init()
2586 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, in amdgpu_vm_init()
2598 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); in amdgpu_vm_init()
2603 r = amdgpu_vm_pt_clear(adev, vm, root, false); in amdgpu_vm_init()
2607 r = amdgpu_vm_create_task_info(vm); in amdgpu_vm_init()
2611 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2617 amdgpu_vm_pt_free_root(adev, vm); in amdgpu_vm_init()
2618 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2622 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_init()
2623 dma_fence_put(vm->last_unlocked); in amdgpu_vm_init()
2624 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move); in amdgpu_vm_init()
2625 amdgpu_vm_fini_entities(vm); in amdgpu_vm_init()
2649 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_make_compute() argument
2653 r = amdgpu_bo_reserve(vm->root.bo, true); in amdgpu_vm_make_compute()
2658 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
2661 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
2662 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_make_compute()
2666 if (vm->use_cpu_for_update) { in amdgpu_vm_make_compute()
2668 r = amdgpu_bo_sync_wait(vm->root.bo, in amdgpu_vm_make_compute()
2673 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
2674 r = amdgpu_vm_pt_map_tables(adev, vm); in amdgpu_vm_make_compute()
2679 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
2682 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
2683 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_make_compute()
2684 vm->is_compute_context = true; in amdgpu_vm_make_compute()
2687 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_make_compute()
2691 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm) in amdgpu_vm_stats_is_zero() argument
2694 if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) && in amdgpu_vm_stats_is_zero()
2695 vm->stats[i].evicted == 0)) in amdgpu_vm_stats_is_zero()
2710 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_fini() argument
2718 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); in amdgpu_vm_fini()
2720 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_fini()
2722 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_fini()
2723 dma_fence_wait(vm->last_unlocked, false); in amdgpu_vm_fini()
2724 dma_fence_put(vm->last_unlocked); in amdgpu_vm_fini()
2725 dma_fence_wait(vm->last_tlb_flush, false); in amdgpu_vm_fini()
2727 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2728 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2729 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_fini()
2731 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
2733 amdgpu_vm_prt_fini(adev, vm); in amdgpu_vm_fini()
2738 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); in amdgpu_vm_fini()
2741 amdgpu_vm_pt_free_root(adev, vm); in amdgpu_vm_fini()
2744 WARN_ON(vm->root.bo); in amdgpu_vm_fini()
2746 amdgpu_vm_fini_entities(vm); in amdgpu_vm_fini()
2748 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
2752 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
2760 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
2763 if (vm->reserved_vmid[i]) { in amdgpu_vm_fini()
2765 vm->reserved_vmid[i] = false; in amdgpu_vm_fini()
2769 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move); in amdgpu_vm_fini()
2771 if (!amdgpu_vm_stats_is_zero(vm)) { in amdgpu_vm_fini()
2772 struct amdgpu_task_info *ti = vm->task_info; in amdgpu_vm_fini()
2779 amdgpu_vm_put_task_info(vm->task_info); in amdgpu_vm_fini()
2870 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2872 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true; in amdgpu_vm_ioctl()
2877 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2879 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false; in amdgpu_vm_ioctl()
2911 struct amdgpu_vm *vm; in amdgpu_vm_handle_fault() local
2915 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2916 if (vm) { in amdgpu_vm_handle_fault()
2917 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_handle_fault()
2918 is_compute_context = vm->is_compute_context; in amdgpu_vm_handle_fault()
2941 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2942 if (vm && vm->root.bo != root) in amdgpu_vm_handle_fault()
2943 vm = NULL; in amdgpu_vm_handle_fault()
2945 if (!vm) in amdgpu_vm_handle_fault()
2974 r = amdgpu_vm_update_range(adev, vm, true, false, false, false, in amdgpu_vm_handle_fault()
2979 r = amdgpu_vm_update_pdes(adev, vm, true); in amdgpu_vm_handle_fault()
3001 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) in amdgpu_debugfs_vm_bo_info() argument
3018 spin_lock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
3020 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3029 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3038 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3047 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3056 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3065 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3070 spin_unlock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
3104 struct amdgpu_vm *vm; in amdgpu_vm_update_fault_cache() local
3109 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_update_fault_cache()
3115 if (vm && status) { in amdgpu_vm_update_fault_cache()
3116 vm->fault_info.addr = addr; in amdgpu_vm_update_fault_cache()
3117 vm->fault_info.status = status; in amdgpu_vm_update_fault_cache()
3127 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX; in amdgpu_vm_update_fault_cache()
3128 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3131 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0; in amdgpu_vm_update_fault_cache()
3132 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3135 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1; in amdgpu_vm_update_fault_cache()
3136 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3154 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo) in amdgpu_vm_is_bo_always_valid() argument
3156 return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv; in amdgpu_vm_is_bo_always_valid()