1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
37 #include "amdgpu_xgmi.h"
38 
39 /**
40  * DOC: GPUVM
41  *
42  * GPUVM is similar to the legacy gart on older asics, however
43  * rather than there being a single global gart table
44  * for the entire GPU, there are multiple VM page tables active
45  * at any given time.  The VM page tables can contain a mix
46  * vram pages and system memory pages and system memory pages
47  * can be mapped as snooped (cached system pages) or unsnooped
48  * (uncached system pages).
49  * Each VM has an ID associated with it and there is a page table
50  * associated with each VMID.  When execting a command buffer,
51  * the kernel tells the the ring what VMID to use for that command
52  * buffer.  VMIDs are allocated dynamically as commands are submitted.
53  * The userspace drivers maintain their own address space and the kernel
54  * sets up their pages tables accordingly when they submit their
55  * command buffers and a VMID is assigned.
56  * Cayman/Trinity support up to 8 active VMs at any given time;
57  * SI supports 16.
58  */
59 
60 #define START(node) ((node)->start)
61 #define LAST(node) ((node)->last)
62 
63 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
64 		     START, LAST, static, amdgpu_vm_it)
65 
66 #undef START
67 #undef LAST
68 
69 /**
70  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
71  */
72 struct amdgpu_prt_cb {
73 
74 	/**
75 	 * @adev: amdgpu device
76 	 */
77 	struct amdgpu_device *adev;
78 
79 	/**
80 	 * @cb: callback
81 	 */
82 	struct dma_fence_cb cb;
83 };
84 
85 /**
86  * amdgpu_vm_level_shift - return the addr shift for each level
87  *
88  * @adev: amdgpu_device pointer
89  * @level: VMPT level
90  *
91  * Returns:
92  * The number of bits the pfn needs to be right shifted for a level.
93  */
94 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
95 				      unsigned level)
96 {
97 	unsigned shift = 0xff;
98 
99 	switch (level) {
100 	case AMDGPU_VM_PDB2:
101 	case AMDGPU_VM_PDB1:
102 	case AMDGPU_VM_PDB0:
103 		shift = 9 * (AMDGPU_VM_PDB0 - level) +
104 			adev->vm_manager.block_size;
105 		break;
106 	case AMDGPU_VM_PTB:
107 		shift = 0;
108 		break;
109 	default:
110 		dev_err(adev->dev, "the level%d isn't supported.\n", level);
111 	}
112 
113 	return shift;
114 }
115 
116 /**
117  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
118  *
119  * @adev: amdgpu_device pointer
120  * @level: VMPT level
121  *
122  * Returns:
123  * The number of entries in a page directory or page table.
124  */
125 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
126 				      unsigned level)
127 {
128 	unsigned shift = amdgpu_vm_level_shift(adev,
129 					       adev->vm_manager.root_level);
130 
131 	if (level == adev->vm_manager.root_level)
132 		/* For the root directory */
133 		return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
134 			>> shift;
135 	else if (level != AMDGPU_VM_PTB)
136 		/* Everything in between */
137 		return 512;
138 	else
139 		/* For the page tables on the leaves */
140 		return AMDGPU_VM_PTE_COUNT(adev);
141 }
142 
143 /**
144  * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
145  *
146  * @adev: amdgpu_device pointer
147  *
148  * Returns:
149  * The number of entries in the root page directory which needs the ATS setting.
150  */
151 static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
152 {
153 	unsigned shift;
154 
155 	shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
156 	return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
157 }
158 
159 /**
160  * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
161  *
162  * @adev: amdgpu_device pointer
163  * @level: VMPT level
164  *
165  * Returns:
166  * The mask to extract the entry number of a PD/PT from an address.
167  */
168 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
169 				       unsigned int level)
170 {
171 	if (level <= adev->vm_manager.root_level)
172 		return 0xffffffff;
173 	else if (level != AMDGPU_VM_PTB)
174 		return 0x1ff;
175 	else
176 		return AMDGPU_VM_PTE_COUNT(adev) - 1;
177 }
178 
179 /**
180  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
181  *
182  * @adev: amdgpu_device pointer
183  * @level: VMPT level
184  *
185  * Returns:
186  * The size of the BO for a page directory or page table in bytes.
187  */
188 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
189 {
190 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
191 }
192 
193 /**
194  * amdgpu_vm_bo_evicted - vm_bo is evicted
195  *
196  * @vm_bo: vm_bo which is evicted
197  *
198  * State for PDs/PTs and per VM BOs which are not at the location they should
199  * be.
200  */
201 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
202 {
203 	struct amdgpu_vm *vm = vm_bo->vm;
204 	struct amdgpu_bo *bo = vm_bo->bo;
205 
206 	vm_bo->moved = true;
207 	if (bo->tbo.type == ttm_bo_type_kernel)
208 		list_move(&vm_bo->vm_status, &vm->evicted);
209 	else
210 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
211 }
212 
213 /**
214  * amdgpu_vm_bo_relocated - vm_bo is reloacted
215  *
216  * @vm_bo: vm_bo which is relocated
217  *
218  * State for PDs/PTs which needs to update their parent PD.
219  */
220 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
221 {
222 	list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
223 }
224 
225 /**
226  * amdgpu_vm_bo_moved - vm_bo is moved
227  *
228  * @vm_bo: vm_bo which is moved
229  *
230  * State for per VM BOs which are moved, but that change is not yet reflected
231  * in the page tables.
232  */
233 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
234 {
235 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
236 }
237 
238 /**
239  * amdgpu_vm_bo_idle - vm_bo is idle
240  *
241  * @vm_bo: vm_bo which is now idle
242  *
243  * State for PDs/PTs and per VM BOs which have gone through the state machine
244  * and are now idle.
245  */
246 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
247 {
248 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
249 	vm_bo->moved = false;
250 }
251 
252 /**
253  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
254  *
255  * @vm_bo: vm_bo which is now invalidated
256  *
257  * State for normal BOs which are invalidated and that change not yet reflected
258  * in the PTs.
259  */
260 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
261 {
262 	spin_lock(&vm_bo->vm->invalidated_lock);
263 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
264 	spin_unlock(&vm_bo->vm->invalidated_lock);
265 }
266 
267 /**
268  * amdgpu_vm_bo_done - vm_bo is done
269  *
270  * @vm_bo: vm_bo which is now done
271  *
272  * State for normal BOs which are invalidated and that change has been updated
273  * in the PTs.
274  */
275 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
276 {
277 	spin_lock(&vm_bo->vm->invalidated_lock);
278 	list_del_init(&vm_bo->vm_status);
279 	spin_unlock(&vm_bo->vm->invalidated_lock);
280 }
281 
282 /**
283  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
284  *
285  * @base: base structure for tracking BO usage in a VM
286  * @vm: vm to which bo is to be added
287  * @bo: amdgpu buffer object
288  *
289  * Initialize a bo_va_base structure and add it to the appropriate lists
290  *
291  */
292 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
293 				   struct amdgpu_vm *vm,
294 				   struct amdgpu_bo *bo)
295 {
296 	base->vm = vm;
297 	base->bo = bo;
298 	base->next = NULL;
299 	INIT_LIST_HEAD(&base->vm_status);
300 
301 	if (!bo)
302 		return;
303 	base->next = bo->vm_bo;
304 	bo->vm_bo = base;
305 
306 	if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
307 		return;
308 
309 	vm->bulk_moveable = false;
310 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
311 		amdgpu_vm_bo_relocated(base);
312 	else
313 		amdgpu_vm_bo_idle(base);
314 
315 	if (bo->preferred_domains &
316 	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
317 		return;
318 
319 	/*
320 	 * we checked all the prerequisites, but it looks like this per vm bo
321 	 * is currently evicted. add the bo to the evicted list to make sure it
322 	 * is validated on next vm use to avoid fault.
323 	 * */
324 	amdgpu_vm_bo_evicted(base);
325 }
326 
327 /**
328  * amdgpu_vm_pt_parent - get the parent page directory
329  *
330  * @pt: child page table
331  *
332  * Helper to get the parent entry for the child page table. NULL if we are at
333  * the root page directory.
334  */
335 static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
336 {
337 	struct amdgpu_bo *parent = pt->base.bo->parent;
338 
339 	if (!parent)
340 		return NULL;
341 
342 	return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
343 }
344 
345 /*
346  * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
347  */
348 struct amdgpu_vm_pt_cursor {
349 	uint64_t pfn;
350 	struct amdgpu_vm_pt *parent;
351 	struct amdgpu_vm_pt *entry;
352 	unsigned level;
353 };
354 
355 /**
356  * amdgpu_vm_pt_start - start PD/PT walk
357  *
358  * @adev: amdgpu_device pointer
359  * @vm: amdgpu_vm structure
360  * @start: start address of the walk
361  * @cursor: state to initialize
362  *
363  * Initialize a amdgpu_vm_pt_cursor to start a walk.
364  */
365 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
366 			       struct amdgpu_vm *vm, uint64_t start,
367 			       struct amdgpu_vm_pt_cursor *cursor)
368 {
369 	cursor->pfn = start;
370 	cursor->parent = NULL;
371 	cursor->entry = &vm->root;
372 	cursor->level = adev->vm_manager.root_level;
373 }
374 
375 /**
376  * amdgpu_vm_pt_descendant - go to child node
377  *
378  * @adev: amdgpu_device pointer
379  * @cursor: current state
380  *
381  * Walk to the child node of the current node.
382  * Returns:
383  * True if the walk was possible, false otherwise.
384  */
385 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
386 				    struct amdgpu_vm_pt_cursor *cursor)
387 {
388 	unsigned mask, shift, idx;
389 
390 	if (!cursor->entry->entries)
391 		return false;
392 
393 	BUG_ON(!cursor->entry->base.bo);
394 	mask = amdgpu_vm_entries_mask(adev, cursor->level);
395 	shift = amdgpu_vm_level_shift(adev, cursor->level);
396 
397 	++cursor->level;
398 	idx = (cursor->pfn >> shift) & mask;
399 	cursor->parent = cursor->entry;
400 	cursor->entry = &cursor->entry->entries[idx];
401 	return true;
402 }
403 
404 /**
405  * amdgpu_vm_pt_sibling - go to sibling node
406  *
407  * @adev: amdgpu_device pointer
408  * @cursor: current state
409  *
410  * Walk to the sibling node of the current node.
411  * Returns:
412  * True if the walk was possible, false otherwise.
413  */
414 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
415 				 struct amdgpu_vm_pt_cursor *cursor)
416 {
417 	unsigned shift, num_entries;
418 
419 	/* Root doesn't have a sibling */
420 	if (!cursor->parent)
421 		return false;
422 
423 	/* Go to our parents and see if we got a sibling */
424 	shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
425 	num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
426 
427 	if (cursor->entry == &cursor->parent->entries[num_entries - 1])
428 		return false;
429 
430 	cursor->pfn += 1ULL << shift;
431 	cursor->pfn &= ~((1ULL << shift) - 1);
432 	++cursor->entry;
433 	return true;
434 }
435 
436 /**
437  * amdgpu_vm_pt_ancestor - go to parent node
438  *
439  * @cursor: current state
440  *
441  * Walk to the parent node of the current node.
442  * Returns:
443  * True if the walk was possible, false otherwise.
444  */
445 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
446 {
447 	if (!cursor->parent)
448 		return false;
449 
450 	--cursor->level;
451 	cursor->entry = cursor->parent;
452 	cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
453 	return true;
454 }
455 
456 /**
457  * amdgpu_vm_pt_next - get next PD/PT in hieratchy
458  *
459  * @adev: amdgpu_device pointer
460  * @cursor: current state
461  *
462  * Walk the PD/PT tree to the next node.
463  */
464 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
465 			      struct amdgpu_vm_pt_cursor *cursor)
466 {
467 	/* First try a newborn child */
468 	if (amdgpu_vm_pt_descendant(adev, cursor))
469 		return;
470 
471 	/* If that didn't worked try to find a sibling */
472 	while (!amdgpu_vm_pt_sibling(adev, cursor)) {
473 		/* No sibling, go to our parents and grandparents */
474 		if (!amdgpu_vm_pt_ancestor(cursor)) {
475 			cursor->pfn = ~0ll;
476 			return;
477 		}
478 	}
479 }
480 
481 /**
482  * amdgpu_vm_pt_first_dfs - start a deep first search
483  *
484  * @adev: amdgpu_device structure
485  * @vm: amdgpu_vm structure
486  * @start: optional cursor to start with
487  * @cursor: state to initialize
488  *
489  * Starts a deep first traversal of the PD/PT tree.
490  */
491 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
492 				   struct amdgpu_vm *vm,
493 				   struct amdgpu_vm_pt_cursor *start,
494 				   struct amdgpu_vm_pt_cursor *cursor)
495 {
496 	if (start)
497 		*cursor = *start;
498 	else
499 		amdgpu_vm_pt_start(adev, vm, 0, cursor);
500 	while (amdgpu_vm_pt_descendant(adev, cursor));
501 }
502 
503 /**
504  * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
505  *
506  * @start: starting point for the search
507  * @entry: current entry
508  *
509  * Returns:
510  * True when the search should continue, false otherwise.
511  */
512 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
513 				      struct amdgpu_vm_pt *entry)
514 {
515 	return entry && (!start || entry != start->entry);
516 }
517 
518 /**
519  * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
520  *
521  * @adev: amdgpu_device structure
522  * @cursor: current state
523  *
524  * Move the cursor to the next node in a deep first search.
525  */
526 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
527 				  struct amdgpu_vm_pt_cursor *cursor)
528 {
529 	if (!cursor->entry)
530 		return;
531 
532 	if (!cursor->parent)
533 		cursor->entry = NULL;
534 	else if (amdgpu_vm_pt_sibling(adev, cursor))
535 		while (amdgpu_vm_pt_descendant(adev, cursor));
536 	else
537 		amdgpu_vm_pt_ancestor(cursor);
538 }
539 
540 /*
541  * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
542  */
543 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)		\
544 	for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)),		\
545 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
546 	     amdgpu_vm_pt_continue_dfs((start), (entry));			\
547 	     (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
548 
549 /**
550  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
551  *
552  * @vm: vm providing the BOs
553  * @validated: head of validation list
554  * @entry: entry to add
555  *
556  * Add the page directory to the list of BOs to
557  * validate for command submission.
558  */
559 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
560 			 struct list_head *validated,
561 			 struct amdgpu_bo_list_entry *entry)
562 {
563 	entry->priority = 0;
564 	entry->tv.bo = &vm->root.base.bo->tbo;
565 	/* One for the VM updates, one for TTM and one for the CS job */
566 	entry->tv.num_shared = 3;
567 	entry->user_pages = NULL;
568 	list_add(&entry->tv.head, validated);
569 }
570 
571 /**
572  * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
573  *
574  * @bo: BO which was removed from the LRU
575  *
576  * Make sure the bulk_moveable flag is updated when a BO is removed from the
577  * LRU.
578  */
579 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
580 {
581 	struct amdgpu_bo *abo;
582 	struct amdgpu_vm_bo_base *bo_base;
583 
584 	if (!amdgpu_bo_is_amdgpu_bo(bo))
585 		return;
586 
587 	if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
588 		return;
589 
590 	abo = ttm_to_amdgpu_bo(bo);
591 	if (!abo->parent)
592 		return;
593 	for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
594 		struct amdgpu_vm *vm = bo_base->vm;
595 
596 		if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
597 			vm->bulk_moveable = false;
598 	}
599 
600 }
601 /**
602  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
603  *
604  * @adev: amdgpu device pointer
605  * @vm: vm providing the BOs
606  *
607  * Move all BOs to the end of LRU and remember their positions to put them
608  * together.
609  */
610 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
611 				struct amdgpu_vm *vm)
612 {
613 	struct amdgpu_vm_bo_base *bo_base;
614 
615 	if (vm->bulk_moveable) {
616 		spin_lock(&ttm_bo_glob.lru_lock);
617 		ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
618 		spin_unlock(&ttm_bo_glob.lru_lock);
619 		return;
620 	}
621 
622 	memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
623 
624 	spin_lock(&ttm_bo_glob.lru_lock);
625 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
626 		struct amdgpu_bo *bo = bo_base->bo;
627 
628 		if (!bo->parent)
629 			continue;
630 
631 		ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
632 		if (bo->shadow)
633 			ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
634 						&vm->lru_bulk_move);
635 	}
636 	spin_unlock(&ttm_bo_glob.lru_lock);
637 
638 	vm->bulk_moveable = true;
639 }
640 
641 /**
642  * amdgpu_vm_validate_pt_bos - validate the page table BOs
643  *
644  * @adev: amdgpu device pointer
645  * @vm: vm providing the BOs
646  * @validate: callback to do the validation
647  * @param: parameter for the validation callback
648  *
649  * Validate the page table BOs on command submission if neccessary.
650  *
651  * Returns:
652  * Validation result.
653  */
654 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
655 			      int (*validate)(void *p, struct amdgpu_bo *bo),
656 			      void *param)
657 {
658 	struct amdgpu_vm_bo_base *bo_base, *tmp;
659 	int r = 0;
660 
661 	vm->bulk_moveable &= list_empty(&vm->evicted);
662 
663 	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
664 		struct amdgpu_bo *bo = bo_base->bo;
665 
666 		r = validate(param, bo);
667 		if (r)
668 			break;
669 
670 		if (bo->tbo.type != ttm_bo_type_kernel) {
671 			amdgpu_vm_bo_moved(bo_base);
672 		} else {
673 			vm->update_funcs->map_table(bo);
674 			if (bo->parent)
675 				amdgpu_vm_bo_relocated(bo_base);
676 			else
677 				amdgpu_vm_bo_idle(bo_base);
678 		}
679 	}
680 
681 	return r;
682 }
683 
684 /**
685  * amdgpu_vm_ready - check VM is ready for updates
686  *
687  * @vm: VM to check
688  *
689  * Check if all VM PDs/PTs are ready for updates
690  *
691  * Returns:
692  * True if eviction list is empty.
693  */
694 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
695 {
696 	return list_empty(&vm->evicted);
697 }
698 
699 /**
700  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
701  *
702  * @adev: amdgpu_device pointer
703  * @vm: VM to clear BO from
704  * @bo: BO to clear
705  * @direct: use a direct update
706  *
707  * Root PD needs to be reserved when calling this.
708  *
709  * Returns:
710  * 0 on success, errno otherwise.
711  */
712 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
713 			      struct amdgpu_vm *vm,
714 			      struct amdgpu_bo *bo,
715 			      bool direct)
716 {
717 	struct ttm_operation_ctx ctx = { true, false };
718 	unsigned level = adev->vm_manager.root_level;
719 	struct amdgpu_vm_update_params params;
720 	struct amdgpu_bo *ancestor = bo;
721 	unsigned entries, ats_entries;
722 	uint64_t addr;
723 	int r;
724 
725 	/* Figure out our place in the hierarchy */
726 	if (ancestor->parent) {
727 		++level;
728 		while (ancestor->parent->parent) {
729 			++level;
730 			ancestor = ancestor->parent;
731 		}
732 	}
733 
734 	entries = amdgpu_bo_size(bo) / 8;
735 	if (!vm->pte_support_ats) {
736 		ats_entries = 0;
737 
738 	} else if (!bo->parent) {
739 		ats_entries = amdgpu_vm_num_ats_entries(adev);
740 		ats_entries = min(ats_entries, entries);
741 		entries -= ats_entries;
742 
743 	} else {
744 		struct amdgpu_vm_pt *pt;
745 
746 		pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
747 		ats_entries = amdgpu_vm_num_ats_entries(adev);
748 		if ((pt - vm->root.entries) >= ats_entries) {
749 			ats_entries = 0;
750 		} else {
751 			ats_entries = entries;
752 			entries = 0;
753 		}
754 	}
755 
756 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
757 	if (r)
758 		return r;
759 
760 	if (bo->shadow) {
761 		r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
762 				    &ctx);
763 		if (r)
764 			return r;
765 	}
766 
767 	r = vm->update_funcs->map_table(bo);
768 	if (r)
769 		return r;
770 
771 	memset(&params, 0, sizeof(params));
772 	params.adev = adev;
773 	params.vm = vm;
774 	params.direct = direct;
775 
776 	r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
777 	if (r)
778 		return r;
779 
780 	addr = 0;
781 	if (ats_entries) {
782 		uint64_t value = 0, flags;
783 
784 		flags = AMDGPU_PTE_DEFAULT_ATC;
785 		if (level != AMDGPU_VM_PTB) {
786 			/* Handle leaf PDEs as PTEs */
787 			flags |= AMDGPU_PDE_PTE;
788 			amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
789 		}
790 
791 		r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
792 					     value, flags);
793 		if (r)
794 			return r;
795 
796 		addr += ats_entries * 8;
797 	}
798 
799 	if (entries) {
800 		uint64_t value = 0, flags = 0;
801 
802 		if (adev->asic_type >= CHIP_VEGA10) {
803 			if (level != AMDGPU_VM_PTB) {
804 				/* Handle leaf PDEs as PTEs */
805 				flags |= AMDGPU_PDE_PTE;
806 				amdgpu_gmc_get_vm_pde(adev, level,
807 						      &value, &flags);
808 			} else {
809 				/* Workaround for fault priority problem on GMC9 */
810 				flags = AMDGPU_PTE_EXECUTABLE;
811 			}
812 		}
813 
814 		r = vm->update_funcs->update(&params, bo, addr, 0, entries,
815 					     value, flags);
816 		if (r)
817 			return r;
818 	}
819 
820 	return vm->update_funcs->commit(&params, NULL);
821 }
822 
823 /**
824  * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
825  *
826  * @adev: amdgpu_device pointer
827  * @vm: requesting vm
828  * @level: the page table level
829  * @direct: use a direct update
830  * @bp: resulting BO allocation parameters
831  */
832 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
833 			       int level, bool direct,
834 			       struct amdgpu_bo_param *bp)
835 {
836 	memset(bp, 0, sizeof(*bp));
837 
838 	bp->size = amdgpu_vm_bo_size(adev, level);
839 	bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
840 	bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
841 	bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
842 	bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
843 		AMDGPU_GEM_CREATE_CPU_GTT_USWC;
844 	if (vm->use_cpu_for_update)
845 		bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
846 	else if (!vm->root.base.bo || vm->root.base.bo->shadow)
847 		bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
848 	bp->type = ttm_bo_type_kernel;
849 	bp->no_wait_gpu = direct;
850 	if (vm->root.base.bo)
851 		bp->resv = vm->root.base.bo->tbo.base.resv;
852 }
853 
854 /**
855  * amdgpu_vm_alloc_pts - Allocate a specific page table
856  *
857  * @adev: amdgpu_device pointer
858  * @vm: VM to allocate page tables for
859  * @cursor: Which page table to allocate
860  * @direct: use a direct update
861  *
862  * Make sure a specific page table or directory is allocated.
863  *
864  * Returns:
865  * 1 if page table needed to be allocated, 0 if page table was already
866  * allocated, negative errno if an error occurred.
867  */
868 static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
869 			       struct amdgpu_vm *vm,
870 			       struct amdgpu_vm_pt_cursor *cursor,
871 			       bool direct)
872 {
873 	struct amdgpu_vm_pt *entry = cursor->entry;
874 	struct amdgpu_bo_param bp;
875 	struct amdgpu_bo *pt;
876 	int r;
877 
878 	if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
879 		unsigned num_entries;
880 
881 		num_entries = amdgpu_vm_num_entries(adev, cursor->level);
882 		entry->entries = kvmalloc_array(num_entries,
883 						sizeof(*entry->entries),
884 						GFP_KERNEL | __GFP_ZERO);
885 		if (!entry->entries)
886 			return -ENOMEM;
887 	}
888 
889 	if (entry->base.bo)
890 		return 0;
891 
892 	amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
893 
894 	r = amdgpu_bo_create(adev, &bp, &pt);
895 	if (r)
896 		return r;
897 
898 	/* Keep a reference to the root directory to avoid
899 	 * freeing them up in the wrong order.
900 	 */
901 	pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
902 	amdgpu_vm_bo_base_init(&entry->base, vm, pt);
903 
904 	r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
905 	if (r)
906 		goto error_free_pt;
907 
908 	return 0;
909 
910 error_free_pt:
911 	amdgpu_bo_unref(&pt->shadow);
912 	amdgpu_bo_unref(&pt);
913 	return r;
914 }
915 
916 /**
917  * amdgpu_vm_free_table - fre one PD/PT
918  *
919  * @entry: PDE to free
920  */
921 static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
922 {
923 	if (entry->base.bo) {
924 		entry->base.bo->vm_bo = NULL;
925 		list_del(&entry->base.vm_status);
926 		amdgpu_bo_unref(&entry->base.bo->shadow);
927 		amdgpu_bo_unref(&entry->base.bo);
928 	}
929 	kvfree(entry->entries);
930 	entry->entries = NULL;
931 }
932 
933 /**
934  * amdgpu_vm_free_pts - free PD/PT levels
935  *
936  * @adev: amdgpu device structure
937  * @vm: amdgpu vm structure
938  * @start: optional cursor where to start freeing PDs/PTs
939  *
940  * Free the page directory or page table level and all sub levels.
941  */
942 static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
943 			       struct amdgpu_vm *vm,
944 			       struct amdgpu_vm_pt_cursor *start)
945 {
946 	struct amdgpu_vm_pt_cursor cursor;
947 	struct amdgpu_vm_pt *entry;
948 
949 	vm->bulk_moveable = false;
950 
951 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
952 		amdgpu_vm_free_table(entry);
953 
954 	if (start)
955 		amdgpu_vm_free_table(start->entry);
956 }
957 
958 /**
959  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
960  *
961  * @adev: amdgpu_device pointer
962  */
963 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
964 {
965 	const struct amdgpu_ip_block *ip_block;
966 	bool has_compute_vm_bug;
967 	struct amdgpu_ring *ring;
968 	int i;
969 
970 	has_compute_vm_bug = false;
971 
972 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
973 	if (ip_block) {
974 		/* Compute has a VM bug for GFX version < 7.
975 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
976 		if (ip_block->version->major <= 7)
977 			has_compute_vm_bug = true;
978 		else if (ip_block->version->major == 8)
979 			if (adev->gfx.mec_fw_version < 673)
980 				has_compute_vm_bug = true;
981 	}
982 
983 	for (i = 0; i < adev->num_rings; i++) {
984 		ring = adev->rings[i];
985 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
986 			/* only compute rings */
987 			ring->has_compute_vm_bug = has_compute_vm_bug;
988 		else
989 			ring->has_compute_vm_bug = false;
990 	}
991 }
992 
993 /**
994  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
995  *
996  * @ring: ring on which the job will be submitted
997  * @job: job to submit
998  *
999  * Returns:
1000  * True if sync is needed.
1001  */
1002 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
1003 				  struct amdgpu_job *job)
1004 {
1005 	struct amdgpu_device *adev = ring->adev;
1006 	unsigned vmhub = ring->funcs->vmhub;
1007 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1008 	struct amdgpu_vmid *id;
1009 	bool gds_switch_needed;
1010 	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
1011 
1012 	if (job->vmid == 0)
1013 		return false;
1014 	id = &id_mgr->ids[job->vmid];
1015 	gds_switch_needed = ring->funcs->emit_gds_switch && (
1016 		id->gds_base != job->gds_base ||
1017 		id->gds_size != job->gds_size ||
1018 		id->gws_base != job->gws_base ||
1019 		id->gws_size != job->gws_size ||
1020 		id->oa_base != job->oa_base ||
1021 		id->oa_size != job->oa_size);
1022 
1023 	if (amdgpu_vmid_had_gpu_reset(adev, id))
1024 		return true;
1025 
1026 	return vm_flush_needed || gds_switch_needed;
1027 }
1028 
1029 /**
1030  * amdgpu_vm_flush - hardware flush the vm
1031  *
1032  * @ring: ring to use for flush
1033  * @job:  related job
1034  * @need_pipe_sync: is pipe sync needed
1035  *
1036  * Emit a VM flush when it is necessary.
1037  *
1038  * Returns:
1039  * 0 on success, errno otherwise.
1040  */
1041 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
1042 		    bool need_pipe_sync)
1043 {
1044 	struct amdgpu_device *adev = ring->adev;
1045 	unsigned vmhub = ring->funcs->vmhub;
1046 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1047 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
1048 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
1049 		id->gds_base != job->gds_base ||
1050 		id->gds_size != job->gds_size ||
1051 		id->gws_base != job->gws_base ||
1052 		id->gws_size != job->gws_size ||
1053 		id->oa_base != job->oa_base ||
1054 		id->oa_size != job->oa_size);
1055 	bool vm_flush_needed = job->vm_needs_flush;
1056 	struct dma_fence *fence = NULL;
1057 	bool pasid_mapping_needed = false;
1058 	unsigned patch_offset = 0;
1059 	int r;
1060 
1061 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
1062 		gds_switch_needed = true;
1063 		vm_flush_needed = true;
1064 		pasid_mapping_needed = true;
1065 	}
1066 
1067 	mutex_lock(&id_mgr->lock);
1068 	if (id->pasid != job->pasid || !id->pasid_mapping ||
1069 	    !dma_fence_is_signaled(id->pasid_mapping))
1070 		pasid_mapping_needed = true;
1071 	mutex_unlock(&id_mgr->lock);
1072 
1073 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1074 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
1075 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1076 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1077 		ring->funcs->emit_wreg;
1078 
1079 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
1080 		return 0;
1081 
1082 	if (ring->funcs->init_cond_exec)
1083 		patch_offset = amdgpu_ring_init_cond_exec(ring);
1084 
1085 	if (need_pipe_sync)
1086 		amdgpu_ring_emit_pipeline_sync(ring);
1087 
1088 	if (vm_flush_needed) {
1089 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
1090 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
1091 	}
1092 
1093 	if (pasid_mapping_needed)
1094 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
1095 
1096 	if (vm_flush_needed || pasid_mapping_needed) {
1097 		r = amdgpu_fence_emit(ring, &fence, 0);
1098 		if (r)
1099 			return r;
1100 	}
1101 
1102 	if (vm_flush_needed) {
1103 		mutex_lock(&id_mgr->lock);
1104 		dma_fence_put(id->last_flush);
1105 		id->last_flush = dma_fence_get(fence);
1106 		id->current_gpu_reset_count =
1107 			atomic_read(&adev->gpu_reset_counter);
1108 		mutex_unlock(&id_mgr->lock);
1109 	}
1110 
1111 	if (pasid_mapping_needed) {
1112 		mutex_lock(&id_mgr->lock);
1113 		id->pasid = job->pasid;
1114 		dma_fence_put(id->pasid_mapping);
1115 		id->pasid_mapping = dma_fence_get(fence);
1116 		mutex_unlock(&id_mgr->lock);
1117 	}
1118 	dma_fence_put(fence);
1119 
1120 	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
1121 		id->gds_base = job->gds_base;
1122 		id->gds_size = job->gds_size;
1123 		id->gws_base = job->gws_base;
1124 		id->gws_size = job->gws_size;
1125 		id->oa_base = job->oa_base;
1126 		id->oa_size = job->oa_size;
1127 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
1128 					    job->gds_size, job->gws_base,
1129 					    job->gws_size, job->oa_base,
1130 					    job->oa_size);
1131 	}
1132 
1133 	if (ring->funcs->patch_cond_exec)
1134 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
1135 
1136 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1137 	if (ring->funcs->emit_switch_buffer) {
1138 		amdgpu_ring_emit_switch_buffer(ring);
1139 		amdgpu_ring_emit_switch_buffer(ring);
1140 	}
1141 	return 0;
1142 }
1143 
1144 /**
1145  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1146  *
1147  * @vm: requested vm
1148  * @bo: requested buffer object
1149  *
1150  * Find @bo inside the requested vm.
1151  * Search inside the @bos vm list for the requested vm
1152  * Returns the found bo_va or NULL if none is found
1153  *
1154  * Object has to be reserved!
1155  *
1156  * Returns:
1157  * Found bo_va or NULL.
1158  */
1159 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1160 				       struct amdgpu_bo *bo)
1161 {
1162 	struct amdgpu_vm_bo_base *base;
1163 
1164 	for (base = bo->vm_bo; base; base = base->next) {
1165 		if (base->vm != vm)
1166 			continue;
1167 
1168 		return container_of(base, struct amdgpu_bo_va, base);
1169 	}
1170 	return NULL;
1171 }
1172 
1173 /**
1174  * amdgpu_vm_map_gart - Resolve gart mapping of addr
1175  *
1176  * @pages_addr: optional DMA address to use for lookup
1177  * @addr: the unmapped addr
1178  *
1179  * Look up the physical address of the page that the pte resolves
1180  * to.
1181  *
1182  * Returns:
1183  * The pointer for the page table entry.
1184  */
1185 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
1186 {
1187 	uint64_t result;
1188 
1189 	/* page table offset */
1190 	result = pages_addr[addr >> PAGE_SHIFT];
1191 
1192 	/* in case cpu page size != gpu page size*/
1193 	result |= addr & (~PAGE_MASK);
1194 
1195 	result &= 0xFFFFFFFFFFFFF000ULL;
1196 
1197 	return result;
1198 }
1199 
1200 /**
1201  * amdgpu_vm_update_pde - update a single level in the hierarchy
1202  *
1203  * @params: parameters for the update
1204  * @vm: requested vm
1205  * @entry: entry to update
1206  *
1207  * Makes sure the requested entry in parent is up to date.
1208  */
1209 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
1210 				struct amdgpu_vm *vm,
1211 				struct amdgpu_vm_pt *entry)
1212 {
1213 	struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
1214 	struct amdgpu_bo *bo = parent->base.bo, *pbo;
1215 	uint64_t pde, pt, flags;
1216 	unsigned level;
1217 
1218 	for (level = 0, pbo = bo->parent; pbo; ++level)
1219 		pbo = pbo->parent;
1220 
1221 	level += params->adev->vm_manager.root_level;
1222 	amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
1223 	pde = (entry - parent->entries) * 8;
1224 	return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
1225 }
1226 
1227 /**
1228  * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1229  *
1230  * @adev: amdgpu_device pointer
1231  * @vm: related vm
1232  *
1233  * Mark all PD level as invalid after an error.
1234  */
1235 static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
1236 				     struct amdgpu_vm *vm)
1237 {
1238 	struct amdgpu_vm_pt_cursor cursor;
1239 	struct amdgpu_vm_pt *entry;
1240 
1241 	for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
1242 		if (entry->base.bo && !entry->base.moved)
1243 			amdgpu_vm_bo_relocated(&entry->base);
1244 }
1245 
1246 /**
1247  * amdgpu_vm_update_pdes - make sure that all directories are valid
1248  *
1249  * @adev: amdgpu_device pointer
1250  * @vm: requested vm
1251  * @direct: submit directly to the paging queue
1252  *
1253  * Makes sure all directories are up to date.
1254  *
1255  * Returns:
1256  * 0 for success, error for failure.
1257  */
1258 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
1259 			  struct amdgpu_vm *vm, bool direct)
1260 {
1261 	struct amdgpu_vm_update_params params;
1262 	int r;
1263 
1264 	if (list_empty(&vm->relocated))
1265 		return 0;
1266 
1267 	memset(&params, 0, sizeof(params));
1268 	params.adev = adev;
1269 	params.vm = vm;
1270 	params.direct = direct;
1271 
1272 	r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
1273 	if (r)
1274 		return r;
1275 
1276 	while (!list_empty(&vm->relocated)) {
1277 		struct amdgpu_vm_pt *entry;
1278 
1279 		entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
1280 					 base.vm_status);
1281 		amdgpu_vm_bo_idle(&entry->base);
1282 
1283 		r = amdgpu_vm_update_pde(&params, vm, entry);
1284 		if (r)
1285 			goto error;
1286 	}
1287 
1288 	r = vm->update_funcs->commit(&params, &vm->last_update);
1289 	if (r)
1290 		goto error;
1291 	return 0;
1292 
1293 error:
1294 	amdgpu_vm_invalidate_pds(adev, vm);
1295 	return r;
1296 }
1297 
1298 /*
1299  * amdgpu_vm_update_flags - figure out flags for PTE updates
1300  *
1301  * Make sure to set the right flags for the PTEs at the desired level.
1302  */
1303 static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
1304 				   struct amdgpu_bo *bo, unsigned level,
1305 				   uint64_t pe, uint64_t addr,
1306 				   unsigned count, uint32_t incr,
1307 				   uint64_t flags)
1308 
1309 {
1310 	if (level != AMDGPU_VM_PTB) {
1311 		flags |= AMDGPU_PDE_PTE;
1312 		amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
1313 
1314 	} else if (params->adev->asic_type >= CHIP_VEGA10 &&
1315 		   !(flags & AMDGPU_PTE_VALID) &&
1316 		   !(flags & AMDGPU_PTE_PRT)) {
1317 
1318 		/* Workaround for fault priority problem on GMC9 */
1319 		flags |= AMDGPU_PTE_EXECUTABLE;
1320 	}
1321 
1322 	params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
1323 					 flags);
1324 }
1325 
1326 /**
1327  * amdgpu_vm_fragment - get fragment for PTEs
1328  *
1329  * @params: see amdgpu_vm_update_params definition
1330  * @start: first PTE to handle
1331  * @end: last PTE to handle
1332  * @flags: hw mapping flags
1333  * @frag: resulting fragment size
1334  * @frag_end: end of this fragment
1335  *
1336  * Returns the first possible fragment for the start and end address.
1337  */
1338 static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
1339 			       uint64_t start, uint64_t end, uint64_t flags,
1340 			       unsigned int *frag, uint64_t *frag_end)
1341 {
1342 	/**
1343 	 * The MC L1 TLB supports variable sized pages, based on a fragment
1344 	 * field in the PTE. When this field is set to a non-zero value, page
1345 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1346 	 * flags are considered valid for all PTEs within the fragment range
1347 	 * and corresponding mappings are assumed to be physically contiguous.
1348 	 *
1349 	 * The L1 TLB can store a single PTE for the whole fragment,
1350 	 * significantly increasing the space available for translation
1351 	 * caching. This leads to large improvements in throughput when the
1352 	 * TLB is under pressure.
1353 	 *
1354 	 * The L2 TLB distributes small and large fragments into two
1355 	 * asymmetric partitions. The large fragment cache is significantly
1356 	 * larger. Thus, we try to use large fragments wherever possible.
1357 	 * Userspace can support this by aligning virtual base address and
1358 	 * allocation size to the fragment size.
1359 	 *
1360 	 * Starting with Vega10 the fragment size only controls the L1. The L2
1361 	 * is now directly feed with small/huge/giant pages from the walker.
1362 	 */
1363 	unsigned max_frag;
1364 
1365 	if (params->adev->asic_type < CHIP_VEGA10)
1366 		max_frag = params->adev->vm_manager.fragment_size;
1367 	else
1368 		max_frag = 31;
1369 
1370 	/* system pages are non continuously */
1371 	if (params->pages_addr) {
1372 		*frag = 0;
1373 		*frag_end = end;
1374 		return;
1375 	}
1376 
1377 	/* This intentionally wraps around if no bit is set */
1378 	*frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
1379 	if (*frag >= max_frag) {
1380 		*frag = max_frag;
1381 		*frag_end = end & ~((1ULL << max_frag) - 1);
1382 	} else {
1383 		*frag_end = start + (1 << *frag);
1384 	}
1385 }
1386 
1387 /**
1388  * amdgpu_vm_update_ptes - make sure that page tables are valid
1389  *
1390  * @params: see amdgpu_vm_update_params definition
1391  * @start: start of GPU address range
1392  * @end: end of GPU address range
1393  * @dst: destination address to map to, the next dst inside the function
1394  * @flags: mapping flags
1395  *
1396  * Update the page tables in the range @start - @end.
1397  *
1398  * Returns:
1399  * 0 for success, -EINVAL for failure.
1400  */
1401 static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
1402 				 uint64_t start, uint64_t end,
1403 				 uint64_t dst, uint64_t flags)
1404 {
1405 	struct amdgpu_device *adev = params->adev;
1406 	struct amdgpu_vm_pt_cursor cursor;
1407 	uint64_t frag_start = start, frag_end;
1408 	unsigned int frag;
1409 	int r;
1410 
1411 	/* figure out the initial fragment */
1412 	amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
1413 
1414 	/* walk over the address space and update the PTs */
1415 	amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
1416 	while (cursor.pfn < end) {
1417 		unsigned shift, parent_shift, mask;
1418 		uint64_t incr, entry_end, pe_start;
1419 		struct amdgpu_bo *pt;
1420 
1421 		r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
1422 					params->direct);
1423 		if (r)
1424 			return r;
1425 
1426 		pt = cursor.entry->base.bo;
1427 
1428 		/* The root level can't be a huge page */
1429 		if (cursor.level == adev->vm_manager.root_level) {
1430 			if (!amdgpu_vm_pt_descendant(adev, &cursor))
1431 				return -ENOENT;
1432 			continue;
1433 		}
1434 
1435 		shift = amdgpu_vm_level_shift(adev, cursor.level);
1436 		parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
1437 		if (adev->asic_type < CHIP_VEGA10 &&
1438 		    (flags & AMDGPU_PTE_VALID)) {
1439 			/* No huge page support before GMC v9 */
1440 			if (cursor.level != AMDGPU_VM_PTB) {
1441 				if (!amdgpu_vm_pt_descendant(adev, &cursor))
1442 					return -ENOENT;
1443 				continue;
1444 			}
1445 		} else if (frag < shift) {
1446 			/* We can't use this level when the fragment size is
1447 			 * smaller than the address shift. Go to the next
1448 			 * child entry and try again.
1449 			 */
1450 			if (!amdgpu_vm_pt_descendant(adev, &cursor))
1451 				return -ENOENT;
1452 			continue;
1453 		} else if (frag >= parent_shift &&
1454 			   cursor.level - 1 != adev->vm_manager.root_level) {
1455 			/* If the fragment size is even larger than the parent
1456 			 * shift we should go up one level and check it again
1457 			 * unless one level up is the root level.
1458 			 */
1459 			if (!amdgpu_vm_pt_ancestor(&cursor))
1460 				return -ENOENT;
1461 			continue;
1462 		}
1463 
1464 		/* Looks good so far, calculate parameters for the update */
1465 		incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1466 		mask = amdgpu_vm_entries_mask(adev, cursor.level);
1467 		pe_start = ((cursor.pfn >> shift) & mask) * 8;
1468 		entry_end = (uint64_t)(mask + 1) << shift;
1469 		entry_end += cursor.pfn & ~(entry_end - 1);
1470 		entry_end = min(entry_end, end);
1471 
1472 		do {
1473 			uint64_t upd_end = min(entry_end, frag_end);
1474 			unsigned nptes = (upd_end - frag_start) >> shift;
1475 
1476 			amdgpu_vm_update_flags(params, pt, cursor.level,
1477 					       pe_start, dst, nptes, incr,
1478 					       flags | AMDGPU_PTE_FRAG(frag));
1479 
1480 			pe_start += nptes * 8;
1481 			dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
1482 
1483 			frag_start = upd_end;
1484 			if (frag_start >= frag_end) {
1485 				/* figure out the next fragment */
1486 				amdgpu_vm_fragment(params, frag_start, end,
1487 						   flags, &frag, &frag_end);
1488 				if (frag < shift)
1489 					break;
1490 			}
1491 		} while (frag_start < entry_end);
1492 
1493 		if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1494 			/* Free all child entries */
1495 			while (cursor.pfn < frag_start) {
1496 				amdgpu_vm_free_pts(adev, params->vm, &cursor);
1497 				amdgpu_vm_pt_next(adev, &cursor);
1498 			}
1499 
1500 		} else if (frag >= shift) {
1501 			/* or just move on to the next on the same level. */
1502 			amdgpu_vm_pt_next(adev, &cursor);
1503 		}
1504 	}
1505 
1506 	return 0;
1507 }
1508 
1509 /**
1510  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1511  *
1512  * @adev: amdgpu_device pointer
1513  * @vm: requested vm
1514  * @direct: direct submission in a page fault
1515  * @exclusive: fence we need to sync to
1516  * @start: start of mapped range
1517  * @last: last mapped entry
1518  * @flags: flags for the entries
1519  * @addr: addr to set the area to
1520  * @pages_addr: DMA addresses to use for mapping
1521  * @fence: optional resulting fence
1522  *
1523  * Fill in the page table entries between @start and @last.
1524  *
1525  * Returns:
1526  * 0 for success, -EINVAL for failure.
1527  */
1528 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1529 				       struct amdgpu_vm *vm, bool direct,
1530 				       struct dma_fence *exclusive,
1531 				       uint64_t start, uint64_t last,
1532 				       uint64_t flags, uint64_t addr,
1533 				       dma_addr_t *pages_addr,
1534 				       struct dma_fence **fence)
1535 {
1536 	struct amdgpu_vm_update_params params;
1537 	void *owner = AMDGPU_FENCE_OWNER_VM;
1538 	int r;
1539 
1540 	memset(&params, 0, sizeof(params));
1541 	params.adev = adev;
1542 	params.vm = vm;
1543 	params.direct = direct;
1544 	params.pages_addr = pages_addr;
1545 
1546 	/* sync to everything except eviction fences on unmapping */
1547 	if (!(flags & AMDGPU_PTE_VALID))
1548 		owner = AMDGPU_FENCE_OWNER_KFD;
1549 
1550 	r = vm->update_funcs->prepare(&params, owner, exclusive);
1551 	if (r)
1552 		return r;
1553 
1554 	r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
1555 	if (r)
1556 		return r;
1557 
1558 	return vm->update_funcs->commit(&params, fence);
1559 }
1560 
1561 /**
1562  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1563  *
1564  * @adev: amdgpu_device pointer
1565  * @exclusive: fence we need to sync to
1566  * @pages_addr: DMA addresses to use for mapping
1567  * @vm: requested vm
1568  * @mapping: mapped range and flags to use for the update
1569  * @flags: HW flags for the mapping
1570  * @bo_adev: amdgpu_device pointer that bo actually been allocated
1571  * @nodes: array of drm_mm_nodes with the MC addresses
1572  * @fence: optional resulting fence
1573  *
1574  * Split the mapping into smaller chunks so that each update fits
1575  * into a SDMA IB.
1576  *
1577  * Returns:
1578  * 0 for success, -EINVAL for failure.
1579  */
1580 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1581 				      struct dma_fence *exclusive,
1582 				      dma_addr_t *pages_addr,
1583 				      struct amdgpu_vm *vm,
1584 				      struct amdgpu_bo_va_mapping *mapping,
1585 				      uint64_t flags,
1586 				      struct amdgpu_device *bo_adev,
1587 				      struct drm_mm_node *nodes,
1588 				      struct dma_fence **fence)
1589 {
1590 	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1591 	uint64_t pfn, start = mapping->start;
1592 	int r;
1593 
1594 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1595 	 * but in case of something, we filter the flags in first place
1596 	 */
1597 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1598 		flags &= ~AMDGPU_PTE_READABLE;
1599 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1600 		flags &= ~AMDGPU_PTE_WRITEABLE;
1601 
1602 	/* Apply ASIC specific mapping flags */
1603 	amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
1604 
1605 	trace_amdgpu_vm_bo_update(mapping);
1606 
1607 	pfn = mapping->offset >> PAGE_SHIFT;
1608 	if (nodes) {
1609 		while (pfn >= nodes->size) {
1610 			pfn -= nodes->size;
1611 			++nodes;
1612 		}
1613 	}
1614 
1615 	do {
1616 		dma_addr_t *dma_addr = NULL;
1617 		uint64_t max_entries;
1618 		uint64_t addr, last;
1619 
1620 		if (nodes) {
1621 			addr = nodes->start << PAGE_SHIFT;
1622 			max_entries = (nodes->size - pfn) *
1623 				AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1624 		} else {
1625 			addr = 0;
1626 			max_entries = S64_MAX;
1627 		}
1628 
1629 		if (pages_addr) {
1630 			uint64_t count;
1631 
1632 			for (count = 1;
1633 			     count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1634 			     ++count) {
1635 				uint64_t idx = pfn + count;
1636 
1637 				if (pages_addr[idx] !=
1638 				    (pages_addr[idx - 1] + PAGE_SIZE))
1639 					break;
1640 			}
1641 
1642 			if (count < min_linear_pages) {
1643 				addr = pfn << PAGE_SHIFT;
1644 				dma_addr = pages_addr;
1645 			} else {
1646 				addr = pages_addr[pfn];
1647 				max_entries = count *
1648 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1649 			}
1650 
1651 		} else if (flags & AMDGPU_PTE_VALID) {
1652 			addr += bo_adev->vm_manager.vram_base_offset;
1653 			addr += pfn << PAGE_SHIFT;
1654 		}
1655 
1656 		last = min((uint64_t)mapping->last, start + max_entries - 1);
1657 		r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
1658 						start, last, flags, addr,
1659 						dma_addr, fence);
1660 		if (r)
1661 			return r;
1662 
1663 		pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1664 		if (nodes && nodes->size == pfn) {
1665 			pfn = 0;
1666 			++nodes;
1667 		}
1668 		start = last + 1;
1669 
1670 	} while (unlikely(start != mapping->last + 1));
1671 
1672 	return 0;
1673 }
1674 
1675 /**
1676  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1677  *
1678  * @adev: amdgpu_device pointer
1679  * @bo_va: requested BO and VM object
1680  * @clear: if true clear the entries
1681  *
1682  * Fill in the page table entries for @bo_va.
1683  *
1684  * Returns:
1685  * 0 for success, -EINVAL for failure.
1686  */
1687 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1688 			bool clear)
1689 {
1690 	struct amdgpu_bo *bo = bo_va->base.bo;
1691 	struct amdgpu_vm *vm = bo_va->base.vm;
1692 	struct amdgpu_bo_va_mapping *mapping;
1693 	dma_addr_t *pages_addr = NULL;
1694 	struct ttm_mem_reg *mem;
1695 	struct drm_mm_node *nodes;
1696 	struct dma_fence *exclusive, **last_update;
1697 	uint64_t flags;
1698 	struct amdgpu_device *bo_adev = adev;
1699 	int r;
1700 
1701 	if (clear || !bo) {
1702 		mem = NULL;
1703 		nodes = NULL;
1704 		exclusive = NULL;
1705 	} else {
1706 		struct ttm_dma_tt *ttm;
1707 
1708 		mem = &bo->tbo.mem;
1709 		nodes = mem->mm_node;
1710 		if (mem->mem_type == TTM_PL_TT) {
1711 			ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1712 			pages_addr = ttm->dma_address;
1713 		}
1714 		exclusive = bo->tbo.moving;
1715 	}
1716 
1717 	if (bo) {
1718 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1719 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1720 	} else {
1721 		flags = 0x0;
1722 	}
1723 
1724 	if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
1725 		last_update = &vm->last_update;
1726 	else
1727 		last_update = &bo_va->last_pt_update;
1728 
1729 	if (!clear && bo_va->base.moved) {
1730 		bo_va->base.moved = false;
1731 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1732 
1733 	} else if (bo_va->cleared != clear) {
1734 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1735 	}
1736 
1737 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1738 		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1739 					       mapping, flags, bo_adev, nodes,
1740 					       last_update);
1741 		if (r)
1742 			return r;
1743 	}
1744 
1745 	/* If the BO is not in its preferred location add it back to
1746 	 * the evicted list so that it gets validated again on the
1747 	 * next command submission.
1748 	 */
1749 	if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
1750 		uint32_t mem_type = bo->tbo.mem.mem_type;
1751 
1752 		if (!(bo->preferred_domains &
1753 		      amdgpu_mem_type_to_domain(mem_type)))
1754 			amdgpu_vm_bo_evicted(&bo_va->base);
1755 		else
1756 			amdgpu_vm_bo_idle(&bo_va->base);
1757 	} else {
1758 		amdgpu_vm_bo_done(&bo_va->base);
1759 	}
1760 
1761 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1762 	bo_va->cleared = clear;
1763 
1764 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1765 		list_for_each_entry(mapping, &bo_va->valids, list)
1766 			trace_amdgpu_vm_bo_mapping(mapping);
1767 	}
1768 
1769 	return 0;
1770 }
1771 
1772 /**
1773  * amdgpu_vm_update_prt_state - update the global PRT state
1774  *
1775  * @adev: amdgpu_device pointer
1776  */
1777 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1778 {
1779 	unsigned long flags;
1780 	bool enable;
1781 
1782 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1783 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1784 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1785 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1786 }
1787 
1788 /**
1789  * amdgpu_vm_prt_get - add a PRT user
1790  *
1791  * @adev: amdgpu_device pointer
1792  */
1793 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1794 {
1795 	if (!adev->gmc.gmc_funcs->set_prt)
1796 		return;
1797 
1798 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1799 		amdgpu_vm_update_prt_state(adev);
1800 }
1801 
1802 /**
1803  * amdgpu_vm_prt_put - drop a PRT user
1804  *
1805  * @adev: amdgpu_device pointer
1806  */
1807 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1808 {
1809 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1810 		amdgpu_vm_update_prt_state(adev);
1811 }
1812 
1813 /**
1814  * amdgpu_vm_prt_cb - callback for updating the PRT status
1815  *
1816  * @fence: fence for the callback
1817  * @_cb: the callback function
1818  */
1819 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1820 {
1821 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1822 
1823 	amdgpu_vm_prt_put(cb->adev);
1824 	kfree(cb);
1825 }
1826 
1827 /**
1828  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1829  *
1830  * @adev: amdgpu_device pointer
1831  * @fence: fence for the callback
1832  */
1833 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1834 				 struct dma_fence *fence)
1835 {
1836 	struct amdgpu_prt_cb *cb;
1837 
1838 	if (!adev->gmc.gmc_funcs->set_prt)
1839 		return;
1840 
1841 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1842 	if (!cb) {
1843 		/* Last resort when we are OOM */
1844 		if (fence)
1845 			dma_fence_wait(fence, false);
1846 
1847 		amdgpu_vm_prt_put(adev);
1848 	} else {
1849 		cb->adev = adev;
1850 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1851 						     amdgpu_vm_prt_cb))
1852 			amdgpu_vm_prt_cb(fence, &cb->cb);
1853 	}
1854 }
1855 
1856 /**
1857  * amdgpu_vm_free_mapping - free a mapping
1858  *
1859  * @adev: amdgpu_device pointer
1860  * @vm: requested vm
1861  * @mapping: mapping to be freed
1862  * @fence: fence of the unmap operation
1863  *
1864  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1865  */
1866 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1867 				   struct amdgpu_vm *vm,
1868 				   struct amdgpu_bo_va_mapping *mapping,
1869 				   struct dma_fence *fence)
1870 {
1871 	if (mapping->flags & AMDGPU_PTE_PRT)
1872 		amdgpu_vm_add_prt_cb(adev, fence);
1873 	kfree(mapping);
1874 }
1875 
1876 /**
1877  * amdgpu_vm_prt_fini - finish all prt mappings
1878  *
1879  * @adev: amdgpu_device pointer
1880  * @vm: requested vm
1881  *
1882  * Register a cleanup callback to disable PRT support after VM dies.
1883  */
1884 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1885 {
1886 	struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
1887 	struct dma_fence *excl, **shared;
1888 	unsigned i, shared_count;
1889 	int r;
1890 
1891 	r = dma_resv_get_fences_rcu(resv, &excl,
1892 					      &shared_count, &shared);
1893 	if (r) {
1894 		/* Not enough memory to grab the fence list, as last resort
1895 		 * block for all the fences to complete.
1896 		 */
1897 		dma_resv_wait_timeout_rcu(resv, true, false,
1898 						    MAX_SCHEDULE_TIMEOUT);
1899 		return;
1900 	}
1901 
1902 	/* Add a callback for each fence in the reservation object */
1903 	amdgpu_vm_prt_get(adev);
1904 	amdgpu_vm_add_prt_cb(adev, excl);
1905 
1906 	for (i = 0; i < shared_count; ++i) {
1907 		amdgpu_vm_prt_get(adev);
1908 		amdgpu_vm_add_prt_cb(adev, shared[i]);
1909 	}
1910 
1911 	kfree(shared);
1912 }
1913 
1914 /**
1915  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1916  *
1917  * @adev: amdgpu_device pointer
1918  * @vm: requested vm
1919  * @fence: optional resulting fence (unchanged if no work needed to be done
1920  * or if an error occurred)
1921  *
1922  * Make sure all freed BOs are cleared in the PT.
1923  * PTs have to be reserved and mutex must be locked!
1924  *
1925  * Returns:
1926  * 0 for success.
1927  *
1928  */
1929 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1930 			  struct amdgpu_vm *vm,
1931 			  struct dma_fence **fence)
1932 {
1933 	struct amdgpu_bo_va_mapping *mapping;
1934 	uint64_t init_pte_value = 0;
1935 	struct dma_fence *f = NULL;
1936 	int r;
1937 
1938 	while (!list_empty(&vm->freed)) {
1939 		mapping = list_first_entry(&vm->freed,
1940 			struct amdgpu_bo_va_mapping, list);
1941 		list_del(&mapping->list);
1942 
1943 		if (vm->pte_support_ats &&
1944 		    mapping->start < AMDGPU_GMC_HOLE_START)
1945 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1946 
1947 		r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
1948 						mapping->start, mapping->last,
1949 						init_pte_value, 0, NULL, &f);
1950 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1951 		if (r) {
1952 			dma_fence_put(f);
1953 			return r;
1954 		}
1955 	}
1956 
1957 	if (fence && f) {
1958 		dma_fence_put(*fence);
1959 		*fence = f;
1960 	} else {
1961 		dma_fence_put(f);
1962 	}
1963 
1964 	return 0;
1965 
1966 }
1967 
1968 /**
1969  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1970  *
1971  * @adev: amdgpu_device pointer
1972  * @vm: requested vm
1973  *
1974  * Make sure all BOs which are moved are updated in the PTs.
1975  *
1976  * Returns:
1977  * 0 for success.
1978  *
1979  * PTs have to be reserved!
1980  */
1981 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1982 			   struct amdgpu_vm *vm)
1983 {
1984 	struct amdgpu_bo_va *bo_va, *tmp;
1985 	struct dma_resv *resv;
1986 	bool clear;
1987 	int r;
1988 
1989 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
1990 		/* Per VM BOs never need to bo cleared in the page tables */
1991 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1992 		if (r)
1993 			return r;
1994 	}
1995 
1996 	spin_lock(&vm->invalidated_lock);
1997 	while (!list_empty(&vm->invalidated)) {
1998 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1999 					 base.vm_status);
2000 		resv = bo_va->base.bo->tbo.base.resv;
2001 		spin_unlock(&vm->invalidated_lock);
2002 
2003 		/* Try to reserve the BO to avoid clearing its ptes */
2004 		if (!amdgpu_vm_debug && dma_resv_trylock(resv))
2005 			clear = false;
2006 		/* Somebody else is using the BO right now */
2007 		else
2008 			clear = true;
2009 
2010 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
2011 		if (r)
2012 			return r;
2013 
2014 		if (!clear)
2015 			dma_resv_unlock(resv);
2016 		spin_lock(&vm->invalidated_lock);
2017 	}
2018 	spin_unlock(&vm->invalidated_lock);
2019 
2020 	return 0;
2021 }
2022 
2023 /**
2024  * amdgpu_vm_bo_add - add a bo to a specific vm
2025  *
2026  * @adev: amdgpu_device pointer
2027  * @vm: requested vm
2028  * @bo: amdgpu buffer object
2029  *
2030  * Add @bo into the requested vm.
2031  * Add @bo to the list of bos associated with the vm
2032  *
2033  * Returns:
2034  * Newly added bo_va or NULL for failure
2035  *
2036  * Object has to be reserved!
2037  */
2038 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2039 				      struct amdgpu_vm *vm,
2040 				      struct amdgpu_bo *bo)
2041 {
2042 	struct amdgpu_bo_va *bo_va;
2043 
2044 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2045 	if (bo_va == NULL) {
2046 		return NULL;
2047 	}
2048 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2049 
2050 	bo_va->ref_count = 1;
2051 	INIT_LIST_HEAD(&bo_va->valids);
2052 	INIT_LIST_HEAD(&bo_va->invalids);
2053 
2054 	if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
2055 	    (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
2056 		bo_va->is_xgmi = true;
2057 		mutex_lock(&adev->vm_manager.lock_pstate);
2058 		/* Power up XGMI if it can be potentially used */
2059 		if (++adev->vm_manager.xgmi_map_counter == 1)
2060 			amdgpu_xgmi_set_pstate(adev, 1);
2061 		mutex_unlock(&adev->vm_manager.lock_pstate);
2062 	}
2063 
2064 	return bo_va;
2065 }
2066 
2067 
2068 /**
2069  * amdgpu_vm_bo_insert_mapping - insert a new mapping
2070  *
2071  * @adev: amdgpu_device pointer
2072  * @bo_va: bo_va to store the address
2073  * @mapping: the mapping to insert
2074  *
2075  * Insert a new mapping into all structures.
2076  */
2077 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2078 				    struct amdgpu_bo_va *bo_va,
2079 				    struct amdgpu_bo_va_mapping *mapping)
2080 {
2081 	struct amdgpu_vm *vm = bo_va->base.vm;
2082 	struct amdgpu_bo *bo = bo_va->base.bo;
2083 
2084 	mapping->bo_va = bo_va;
2085 	list_add(&mapping->list, &bo_va->invalids);
2086 	amdgpu_vm_it_insert(mapping, &vm->va);
2087 
2088 	if (mapping->flags & AMDGPU_PTE_PRT)
2089 		amdgpu_vm_prt_get(adev);
2090 
2091 	if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
2092 	    !bo_va->base.moved) {
2093 		list_move(&bo_va->base.vm_status, &vm->moved);
2094 	}
2095 	trace_amdgpu_vm_bo_map(bo_va, mapping);
2096 }
2097 
2098 /**
2099  * amdgpu_vm_bo_map - map bo inside a vm
2100  *
2101  * @adev: amdgpu_device pointer
2102  * @bo_va: bo_va to store the address
2103  * @saddr: where to map the BO
2104  * @offset: requested offset in the BO
2105  * @size: BO size in bytes
2106  * @flags: attributes of pages (read/write/valid/etc.)
2107  *
2108  * Add a mapping of the BO at the specefied addr into the VM.
2109  *
2110  * Returns:
2111  * 0 for success, error for failure.
2112  *
2113  * Object has to be reserved and unreserved outside!
2114  */
2115 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2116 		     struct amdgpu_bo_va *bo_va,
2117 		     uint64_t saddr, uint64_t offset,
2118 		     uint64_t size, uint64_t flags)
2119 {
2120 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2121 	struct amdgpu_bo *bo = bo_va->base.bo;
2122 	struct amdgpu_vm *vm = bo_va->base.vm;
2123 	uint64_t eaddr;
2124 
2125 	/* validate the parameters */
2126 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2127 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2128 		return -EINVAL;
2129 
2130 	/* make sure object fit at this offset */
2131 	eaddr = saddr + size - 1;
2132 	if (saddr >= eaddr ||
2133 	    (bo && offset + size > amdgpu_bo_size(bo)))
2134 		return -EINVAL;
2135 
2136 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2137 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2138 
2139 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2140 	if (tmp) {
2141 		/* bo and tmp overlap, invalid addr */
2142 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2143 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2144 			tmp->start, tmp->last + 1);
2145 		return -EINVAL;
2146 	}
2147 
2148 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2149 	if (!mapping)
2150 		return -ENOMEM;
2151 
2152 	mapping->start = saddr;
2153 	mapping->last = eaddr;
2154 	mapping->offset = offset;
2155 	mapping->flags = flags;
2156 
2157 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2158 
2159 	return 0;
2160 }
2161 
2162 /**
2163  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2164  *
2165  * @adev: amdgpu_device pointer
2166  * @bo_va: bo_va to store the address
2167  * @saddr: where to map the BO
2168  * @offset: requested offset in the BO
2169  * @size: BO size in bytes
2170  * @flags: attributes of pages (read/write/valid/etc.)
2171  *
2172  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2173  * mappings as we do so.
2174  *
2175  * Returns:
2176  * 0 for success, error for failure.
2177  *
2178  * Object has to be reserved and unreserved outside!
2179  */
2180 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2181 			     struct amdgpu_bo_va *bo_va,
2182 			     uint64_t saddr, uint64_t offset,
2183 			     uint64_t size, uint64_t flags)
2184 {
2185 	struct amdgpu_bo_va_mapping *mapping;
2186 	struct amdgpu_bo *bo = bo_va->base.bo;
2187 	uint64_t eaddr;
2188 	int r;
2189 
2190 	/* validate the parameters */
2191 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2192 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2193 		return -EINVAL;
2194 
2195 	/* make sure object fit at this offset */
2196 	eaddr = saddr + size - 1;
2197 	if (saddr >= eaddr ||
2198 	    (bo && offset + size > amdgpu_bo_size(bo)))
2199 		return -EINVAL;
2200 
2201 	/* Allocate all the needed memory */
2202 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2203 	if (!mapping)
2204 		return -ENOMEM;
2205 
2206 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2207 	if (r) {
2208 		kfree(mapping);
2209 		return r;
2210 	}
2211 
2212 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2213 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2214 
2215 	mapping->start = saddr;
2216 	mapping->last = eaddr;
2217 	mapping->offset = offset;
2218 	mapping->flags = flags;
2219 
2220 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2221 
2222 	return 0;
2223 }
2224 
2225 /**
2226  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2227  *
2228  * @adev: amdgpu_device pointer
2229  * @bo_va: bo_va to remove the address from
2230  * @saddr: where to the BO is mapped
2231  *
2232  * Remove a mapping of the BO at the specefied addr from the VM.
2233  *
2234  * Returns:
2235  * 0 for success, error for failure.
2236  *
2237  * Object has to be reserved and unreserved outside!
2238  */
2239 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2240 		       struct amdgpu_bo_va *bo_va,
2241 		       uint64_t saddr)
2242 {
2243 	struct amdgpu_bo_va_mapping *mapping;
2244 	struct amdgpu_vm *vm = bo_va->base.vm;
2245 	bool valid = true;
2246 
2247 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2248 
2249 	list_for_each_entry(mapping, &bo_va->valids, list) {
2250 		if (mapping->start == saddr)
2251 			break;
2252 	}
2253 
2254 	if (&mapping->list == &bo_va->valids) {
2255 		valid = false;
2256 
2257 		list_for_each_entry(mapping, &bo_va->invalids, list) {
2258 			if (mapping->start == saddr)
2259 				break;
2260 		}
2261 
2262 		if (&mapping->list == &bo_va->invalids)
2263 			return -ENOENT;
2264 	}
2265 
2266 	list_del(&mapping->list);
2267 	amdgpu_vm_it_remove(mapping, &vm->va);
2268 	mapping->bo_va = NULL;
2269 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2270 
2271 	if (valid)
2272 		list_add(&mapping->list, &vm->freed);
2273 	else
2274 		amdgpu_vm_free_mapping(adev, vm, mapping,
2275 				       bo_va->last_pt_update);
2276 
2277 	return 0;
2278 }
2279 
2280 /**
2281  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2282  *
2283  * @adev: amdgpu_device pointer
2284  * @vm: VM structure to use
2285  * @saddr: start of the range
2286  * @size: size of the range
2287  *
2288  * Remove all mappings in a range, split them as appropriate.
2289  *
2290  * Returns:
2291  * 0 for success, error for failure.
2292  */
2293 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2294 				struct amdgpu_vm *vm,
2295 				uint64_t saddr, uint64_t size)
2296 {
2297 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2298 	LIST_HEAD(removed);
2299 	uint64_t eaddr;
2300 
2301 	eaddr = saddr + size - 1;
2302 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2303 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2304 
2305 	/* Allocate all the needed memory */
2306 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2307 	if (!before)
2308 		return -ENOMEM;
2309 	INIT_LIST_HEAD(&before->list);
2310 
2311 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2312 	if (!after) {
2313 		kfree(before);
2314 		return -ENOMEM;
2315 	}
2316 	INIT_LIST_HEAD(&after->list);
2317 
2318 	/* Now gather all removed mappings */
2319 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2320 	while (tmp) {
2321 		/* Remember mapping split at the start */
2322 		if (tmp->start < saddr) {
2323 			before->start = tmp->start;
2324 			before->last = saddr - 1;
2325 			before->offset = tmp->offset;
2326 			before->flags = tmp->flags;
2327 			before->bo_va = tmp->bo_va;
2328 			list_add(&before->list, &tmp->bo_va->invalids);
2329 		}
2330 
2331 		/* Remember mapping split at the end */
2332 		if (tmp->last > eaddr) {
2333 			after->start = eaddr + 1;
2334 			after->last = tmp->last;
2335 			after->offset = tmp->offset;
2336 			after->offset += after->start - tmp->start;
2337 			after->flags = tmp->flags;
2338 			after->bo_va = tmp->bo_va;
2339 			list_add(&after->list, &tmp->bo_va->invalids);
2340 		}
2341 
2342 		list_del(&tmp->list);
2343 		list_add(&tmp->list, &removed);
2344 
2345 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2346 	}
2347 
2348 	/* And free them up */
2349 	list_for_each_entry_safe(tmp, next, &removed, list) {
2350 		amdgpu_vm_it_remove(tmp, &vm->va);
2351 		list_del(&tmp->list);
2352 
2353 		if (tmp->start < saddr)
2354 		    tmp->start = saddr;
2355 		if (tmp->last > eaddr)
2356 		    tmp->last = eaddr;
2357 
2358 		tmp->bo_va = NULL;
2359 		list_add(&tmp->list, &vm->freed);
2360 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2361 	}
2362 
2363 	/* Insert partial mapping before the range */
2364 	if (!list_empty(&before->list)) {
2365 		amdgpu_vm_it_insert(before, &vm->va);
2366 		if (before->flags & AMDGPU_PTE_PRT)
2367 			amdgpu_vm_prt_get(adev);
2368 	} else {
2369 		kfree(before);
2370 	}
2371 
2372 	/* Insert partial mapping after the range */
2373 	if (!list_empty(&after->list)) {
2374 		amdgpu_vm_it_insert(after, &vm->va);
2375 		if (after->flags & AMDGPU_PTE_PRT)
2376 			amdgpu_vm_prt_get(adev);
2377 	} else {
2378 		kfree(after);
2379 	}
2380 
2381 	return 0;
2382 }
2383 
2384 /**
2385  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2386  *
2387  * @vm: the requested VM
2388  * @addr: the address
2389  *
2390  * Find a mapping by it's address.
2391  *
2392  * Returns:
2393  * The amdgpu_bo_va_mapping matching for addr or NULL
2394  *
2395  */
2396 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2397 							 uint64_t addr)
2398 {
2399 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2400 }
2401 
2402 /**
2403  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2404  *
2405  * @vm: the requested vm
2406  * @ticket: CS ticket
2407  *
2408  * Trace all mappings of BOs reserved during a command submission.
2409  */
2410 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2411 {
2412 	struct amdgpu_bo_va_mapping *mapping;
2413 
2414 	if (!trace_amdgpu_vm_bo_cs_enabled())
2415 		return;
2416 
2417 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2418 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2419 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2420 			struct amdgpu_bo *bo;
2421 
2422 			bo = mapping->bo_va->base.bo;
2423 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2424 			    ticket)
2425 				continue;
2426 		}
2427 
2428 		trace_amdgpu_vm_bo_cs(mapping);
2429 	}
2430 }
2431 
2432 /**
2433  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2434  *
2435  * @adev: amdgpu_device pointer
2436  * @bo_va: requested bo_va
2437  *
2438  * Remove @bo_va->bo from the requested vm.
2439  *
2440  * Object have to be reserved!
2441  */
2442 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2443 		      struct amdgpu_bo_va *bo_va)
2444 {
2445 	struct amdgpu_bo_va_mapping *mapping, *next;
2446 	struct amdgpu_bo *bo = bo_va->base.bo;
2447 	struct amdgpu_vm *vm = bo_va->base.vm;
2448 	struct amdgpu_vm_bo_base **base;
2449 
2450 	if (bo) {
2451 		if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
2452 			vm->bulk_moveable = false;
2453 
2454 		for (base = &bo_va->base.bo->vm_bo; *base;
2455 		     base = &(*base)->next) {
2456 			if (*base != &bo_va->base)
2457 				continue;
2458 
2459 			*base = bo_va->base.next;
2460 			break;
2461 		}
2462 	}
2463 
2464 	spin_lock(&vm->invalidated_lock);
2465 	list_del(&bo_va->base.vm_status);
2466 	spin_unlock(&vm->invalidated_lock);
2467 
2468 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2469 		list_del(&mapping->list);
2470 		amdgpu_vm_it_remove(mapping, &vm->va);
2471 		mapping->bo_va = NULL;
2472 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2473 		list_add(&mapping->list, &vm->freed);
2474 	}
2475 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2476 		list_del(&mapping->list);
2477 		amdgpu_vm_it_remove(mapping, &vm->va);
2478 		amdgpu_vm_free_mapping(adev, vm, mapping,
2479 				       bo_va->last_pt_update);
2480 	}
2481 
2482 	dma_fence_put(bo_va->last_pt_update);
2483 
2484 	if (bo && bo_va->is_xgmi) {
2485 		mutex_lock(&adev->vm_manager.lock_pstate);
2486 		if (--adev->vm_manager.xgmi_map_counter == 0)
2487 			amdgpu_xgmi_set_pstate(adev, 0);
2488 		mutex_unlock(&adev->vm_manager.lock_pstate);
2489 	}
2490 
2491 	kfree(bo_va);
2492 }
2493 
2494 /**
2495  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2496  *
2497  * @adev: amdgpu_device pointer
2498  * @bo: amdgpu buffer object
2499  * @evicted: is the BO evicted
2500  *
2501  * Mark @bo as invalid.
2502  */
2503 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2504 			     struct amdgpu_bo *bo, bool evicted)
2505 {
2506 	struct amdgpu_vm_bo_base *bo_base;
2507 
2508 	/* shadow bo doesn't have bo base, its validation needs its parent */
2509 	if (bo->parent && bo->parent->shadow == bo)
2510 		bo = bo->parent;
2511 
2512 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2513 		struct amdgpu_vm *vm = bo_base->vm;
2514 
2515 		if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
2516 			amdgpu_vm_bo_evicted(bo_base);
2517 			continue;
2518 		}
2519 
2520 		if (bo_base->moved)
2521 			continue;
2522 		bo_base->moved = true;
2523 
2524 		if (bo->tbo.type == ttm_bo_type_kernel)
2525 			amdgpu_vm_bo_relocated(bo_base);
2526 		else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
2527 			amdgpu_vm_bo_moved(bo_base);
2528 		else
2529 			amdgpu_vm_bo_invalidated(bo_base);
2530 	}
2531 }
2532 
2533 /**
2534  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2535  *
2536  * @vm_size: VM size
2537  *
2538  * Returns:
2539  * VM page table as power of two
2540  */
2541 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2542 {
2543 	/* Total bits covered by PD + PTs */
2544 	unsigned bits = ilog2(vm_size) + 18;
2545 
2546 	/* Make sure the PD is 4K in size up to 8GB address space.
2547 	   Above that split equal between PD and PTs */
2548 	if (vm_size <= 8)
2549 		return (bits - 9);
2550 	else
2551 		return ((bits + 3) / 2);
2552 }
2553 
2554 /**
2555  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2556  *
2557  * @adev: amdgpu_device pointer
2558  * @min_vm_size: the minimum vm size in GB if it's set auto
2559  * @fragment_size_default: Default PTE fragment size
2560  * @max_level: max VMPT level
2561  * @max_bits: max address space size in bits
2562  *
2563  */
2564 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2565 			   uint32_t fragment_size_default, unsigned max_level,
2566 			   unsigned max_bits)
2567 {
2568 	unsigned int max_size = 1 << (max_bits - 30);
2569 	unsigned int vm_size;
2570 	uint64_t tmp;
2571 
2572 	/* adjust vm size first */
2573 	if (amdgpu_vm_size != -1) {
2574 		vm_size = amdgpu_vm_size;
2575 		if (vm_size > max_size) {
2576 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2577 				 amdgpu_vm_size, max_size);
2578 			vm_size = max_size;
2579 		}
2580 	} else {
2581 		struct sysinfo si;
2582 		unsigned int phys_ram_gb;
2583 
2584 		/* Optimal VM size depends on the amount of physical
2585 		 * RAM available. Underlying requirements and
2586 		 * assumptions:
2587 		 *
2588 		 *  - Need to map system memory and VRAM from all GPUs
2589 		 *     - VRAM from other GPUs not known here
2590 		 *     - Assume VRAM <= system memory
2591 		 *  - On GFX8 and older, VM space can be segmented for
2592 		 *    different MTYPEs
2593 		 *  - Need to allow room for fragmentation, guard pages etc.
2594 		 *
2595 		 * This adds up to a rough guess of system memory x3.
2596 		 * Round up to power of two to maximize the available
2597 		 * VM size with the given page table size.
2598 		 */
2599 		si_meminfo(&si);
2600 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2601 			       (1 << 30) - 1) >> 30;
2602 		vm_size = roundup_pow_of_two(
2603 			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2604 	}
2605 
2606 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2607 
2608 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2609 	if (amdgpu_vm_block_size != -1)
2610 		tmp >>= amdgpu_vm_block_size - 9;
2611 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2612 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2613 	switch (adev->vm_manager.num_level) {
2614 	case 3:
2615 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2616 		break;
2617 	case 2:
2618 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2619 		break;
2620 	case 1:
2621 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2622 		break;
2623 	default:
2624 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2625 	}
2626 	/* block size depends on vm size and hw setup*/
2627 	if (amdgpu_vm_block_size != -1)
2628 		adev->vm_manager.block_size =
2629 			min((unsigned)amdgpu_vm_block_size, max_bits
2630 			    - AMDGPU_GPU_PAGE_SHIFT
2631 			    - 9 * adev->vm_manager.num_level);
2632 	else if (adev->vm_manager.num_level > 1)
2633 		adev->vm_manager.block_size = 9;
2634 	else
2635 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2636 
2637 	if (amdgpu_vm_fragment_size == -1)
2638 		adev->vm_manager.fragment_size = fragment_size_default;
2639 	else
2640 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2641 
2642 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2643 		 vm_size, adev->vm_manager.num_level + 1,
2644 		 adev->vm_manager.block_size,
2645 		 adev->vm_manager.fragment_size);
2646 }
2647 
2648 /**
2649  * amdgpu_vm_wait_idle - wait for the VM to become idle
2650  *
2651  * @vm: VM object to wait for
2652  * @timeout: timeout to wait for VM to become idle
2653  */
2654 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2655 {
2656 	return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
2657 						   true, true, timeout);
2658 }
2659 
2660 /**
2661  * amdgpu_vm_init - initialize a vm instance
2662  *
2663  * @adev: amdgpu_device pointer
2664  * @vm: requested vm
2665  * @vm_context: Indicates if it GFX or Compute context
2666  * @pasid: Process address space identifier
2667  *
2668  * Init @vm fields.
2669  *
2670  * Returns:
2671  * 0 for success, error for failure.
2672  */
2673 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2674 		   int vm_context, unsigned int pasid)
2675 {
2676 	struct amdgpu_bo_param bp;
2677 	struct amdgpu_bo *root;
2678 	int r, i;
2679 
2680 	vm->va = RB_ROOT_CACHED;
2681 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2682 		vm->reserved_vmid[i] = NULL;
2683 	INIT_LIST_HEAD(&vm->evicted);
2684 	INIT_LIST_HEAD(&vm->relocated);
2685 	INIT_LIST_HEAD(&vm->moved);
2686 	INIT_LIST_HEAD(&vm->idle);
2687 	INIT_LIST_HEAD(&vm->invalidated);
2688 	spin_lock_init(&vm->invalidated_lock);
2689 	INIT_LIST_HEAD(&vm->freed);
2690 
2691 	/* create scheduler entities for page table updates */
2692 	r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
2693 				  adev->vm_manager.vm_pte_num_rqs, NULL);
2694 	if (r)
2695 		return r;
2696 
2697 	r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
2698 				  adev->vm_manager.vm_pte_num_rqs, NULL);
2699 	if (r)
2700 		goto error_free_direct;
2701 
2702 	vm->pte_support_ats = false;
2703 
2704 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2705 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2706 						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2707 
2708 		if (adev->asic_type == CHIP_RAVEN)
2709 			vm->pte_support_ats = true;
2710 	} else {
2711 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2712 						AMDGPU_VM_USE_CPU_FOR_GFX);
2713 	}
2714 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2715 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2716 	WARN_ONCE((vm->use_cpu_for_update &&
2717 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2718 		  "CPU update of VM recommended only for large BAR system\n");
2719 
2720 	if (vm->use_cpu_for_update)
2721 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2722 	else
2723 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2724 	vm->last_update = NULL;
2725 
2726 	amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
2727 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
2728 		bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
2729 	r = amdgpu_bo_create(adev, &bp, &root);
2730 	if (r)
2731 		goto error_free_delayed;
2732 
2733 	r = amdgpu_bo_reserve(root, true);
2734 	if (r)
2735 		goto error_free_root;
2736 
2737 	r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
2738 	if (r)
2739 		goto error_unreserve;
2740 
2741 	amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2742 
2743 	r = amdgpu_vm_clear_bo(adev, vm, root, false);
2744 	if (r)
2745 		goto error_unreserve;
2746 
2747 	amdgpu_bo_unreserve(vm->root.base.bo);
2748 
2749 	if (pasid) {
2750 		unsigned long flags;
2751 
2752 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2753 		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2754 			      GFP_ATOMIC);
2755 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2756 		if (r < 0)
2757 			goto error_free_root;
2758 
2759 		vm->pasid = pasid;
2760 	}
2761 
2762 	INIT_KFIFO(vm->faults);
2763 
2764 	return 0;
2765 
2766 error_unreserve:
2767 	amdgpu_bo_unreserve(vm->root.base.bo);
2768 
2769 error_free_root:
2770 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2771 	amdgpu_bo_unref(&vm->root.base.bo);
2772 	vm->root.base.bo = NULL;
2773 
2774 error_free_delayed:
2775 	drm_sched_entity_destroy(&vm->delayed);
2776 
2777 error_free_direct:
2778 	drm_sched_entity_destroy(&vm->direct);
2779 
2780 	return r;
2781 }
2782 
2783 /**
2784  * amdgpu_vm_check_clean_reserved - check if a VM is clean
2785  *
2786  * @adev: amdgpu_device pointer
2787  * @vm: the VM to check
2788  *
2789  * check all entries of the root PD, if any subsequent PDs are allocated,
2790  * it means there are page table creating and filling, and is no a clean
2791  * VM
2792  *
2793  * Returns:
2794  *	0 if this VM is clean
2795  */
2796 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
2797 	struct amdgpu_vm *vm)
2798 {
2799 	enum amdgpu_vm_level root = adev->vm_manager.root_level;
2800 	unsigned int entries = amdgpu_vm_num_entries(adev, root);
2801 	unsigned int i = 0;
2802 
2803 	if (!(vm->root.entries))
2804 		return 0;
2805 
2806 	for (i = 0; i < entries; i++) {
2807 		if (vm->root.entries[i].base.bo)
2808 			return -EINVAL;
2809 	}
2810 
2811 	return 0;
2812 }
2813 
2814 /**
2815  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2816  *
2817  * @adev: amdgpu_device pointer
2818  * @vm: requested vm
2819  * @pasid: pasid to use
2820  *
2821  * This only works on GFX VMs that don't have any BOs added and no
2822  * page tables allocated yet.
2823  *
2824  * Changes the following VM parameters:
2825  * - use_cpu_for_update
2826  * - pte_supports_ats
2827  * - pasid (old PASID is released, because compute manages its own PASIDs)
2828  *
2829  * Reinitializes the page directory to reflect the changed ATS
2830  * setting.
2831  *
2832  * Returns:
2833  * 0 for success, -errno for errors.
2834  */
2835 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2836 			   unsigned int pasid)
2837 {
2838 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2839 	int r;
2840 
2841 	r = amdgpu_bo_reserve(vm->root.base.bo, true);
2842 	if (r)
2843 		return r;
2844 
2845 	/* Sanity checks */
2846 	r = amdgpu_vm_check_clean_reserved(adev, vm);
2847 	if (r)
2848 		goto unreserve_bo;
2849 
2850 	if (pasid) {
2851 		unsigned long flags;
2852 
2853 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2854 		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2855 			      GFP_ATOMIC);
2856 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2857 
2858 		if (r == -ENOSPC)
2859 			goto unreserve_bo;
2860 		r = 0;
2861 	}
2862 
2863 	/* Check if PD needs to be reinitialized and do it before
2864 	 * changing any other state, in case it fails.
2865 	 */
2866 	if (pte_support_ats != vm->pte_support_ats) {
2867 		vm->pte_support_ats = pte_support_ats;
2868 		r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
2869 		if (r)
2870 			goto free_idr;
2871 	}
2872 
2873 	/* Update VM state */
2874 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2875 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2876 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2877 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2878 	WARN_ONCE((vm->use_cpu_for_update &&
2879 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2880 		  "CPU update of VM recommended only for large BAR system\n");
2881 
2882 	if (vm->use_cpu_for_update)
2883 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2884 	else
2885 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2886 	dma_fence_put(vm->last_update);
2887 	vm->last_update = NULL;
2888 
2889 	if (vm->pasid) {
2890 		unsigned long flags;
2891 
2892 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2893 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2894 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2895 
2896 		/* Free the original amdgpu allocated pasid
2897 		 * Will be replaced with kfd allocated pasid
2898 		 */
2899 		amdgpu_pasid_free(vm->pasid);
2900 		vm->pasid = 0;
2901 	}
2902 
2903 	/* Free the shadow bo for compute VM */
2904 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2905 
2906 	if (pasid)
2907 		vm->pasid = pasid;
2908 
2909 	goto unreserve_bo;
2910 
2911 free_idr:
2912 	if (pasid) {
2913 		unsigned long flags;
2914 
2915 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2916 		idr_remove(&adev->vm_manager.pasid_idr, pasid);
2917 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2918 	}
2919 unreserve_bo:
2920 	amdgpu_bo_unreserve(vm->root.base.bo);
2921 	return r;
2922 }
2923 
2924 /**
2925  * amdgpu_vm_release_compute - release a compute vm
2926  * @adev: amdgpu_device pointer
2927  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2928  *
2929  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2930  * pasid from vm. Compute should stop use of vm after this call.
2931  */
2932 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2933 {
2934 	if (vm->pasid) {
2935 		unsigned long flags;
2936 
2937 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2938 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2939 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2940 	}
2941 	vm->pasid = 0;
2942 }
2943 
2944 /**
2945  * amdgpu_vm_fini - tear down a vm instance
2946  *
2947  * @adev: amdgpu_device pointer
2948  * @vm: requested vm
2949  *
2950  * Tear down @vm.
2951  * Unbind the VM and remove all bos from the vm bo list
2952  */
2953 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2954 {
2955 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2956 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2957 	struct amdgpu_bo *root;
2958 	int i;
2959 
2960 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2961 
2962 	root = amdgpu_bo_ref(vm->root.base.bo);
2963 	amdgpu_bo_reserve(root, true);
2964 	if (vm->pasid) {
2965 		unsigned long flags;
2966 
2967 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2968 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2969 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2970 		vm->pasid = 0;
2971 	}
2972 
2973 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2974 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2975 			amdgpu_vm_prt_fini(adev, vm);
2976 			prt_fini_needed = false;
2977 		}
2978 
2979 		list_del(&mapping->list);
2980 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2981 	}
2982 
2983 	amdgpu_vm_free_pts(adev, vm, NULL);
2984 	amdgpu_bo_unreserve(root);
2985 	amdgpu_bo_unref(&root);
2986 	WARN_ON(vm->root.base.bo);
2987 
2988 	drm_sched_entity_destroy(&vm->direct);
2989 	drm_sched_entity_destroy(&vm->delayed);
2990 
2991 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2992 		dev_err(adev->dev, "still active bo inside vm\n");
2993 	}
2994 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2995 					     &vm->va.rb_root, rb) {
2996 		/* Don't remove the mapping here, we don't want to trigger a
2997 		 * rebalance and the tree is about to be destroyed anyway.
2998 		 */
2999 		list_del(&mapping->list);
3000 		kfree(mapping);
3001 	}
3002 
3003 	dma_fence_put(vm->last_update);
3004 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
3005 		amdgpu_vmid_free_reserved(adev, vm, i);
3006 }
3007 
3008 /**
3009  * amdgpu_vm_manager_init - init the VM manager
3010  *
3011  * @adev: amdgpu_device pointer
3012  *
3013  * Initialize the VM manager structures
3014  */
3015 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
3016 {
3017 	unsigned i;
3018 
3019 	amdgpu_vmid_mgr_init(adev);
3020 
3021 	adev->vm_manager.fence_context =
3022 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3023 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
3024 		adev->vm_manager.seqno[i] = 0;
3025 
3026 	spin_lock_init(&adev->vm_manager.prt_lock);
3027 	atomic_set(&adev->vm_manager.num_prt_users, 0);
3028 
3029 	/* If not overridden by the user, by default, only in large BAR systems
3030 	 * Compute VM tables will be updated by CPU
3031 	 */
3032 #ifdef CONFIG_X86_64
3033 	if (amdgpu_vm_update_mode == -1) {
3034 		if (amdgpu_gmc_vram_full_visible(&adev->gmc))
3035 			adev->vm_manager.vm_update_mode =
3036 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3037 		else
3038 			adev->vm_manager.vm_update_mode = 0;
3039 	} else
3040 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3041 #else
3042 	adev->vm_manager.vm_update_mode = 0;
3043 #endif
3044 
3045 	idr_init(&adev->vm_manager.pasid_idr);
3046 	spin_lock_init(&adev->vm_manager.pasid_lock);
3047 
3048 	adev->vm_manager.xgmi_map_counter = 0;
3049 	mutex_init(&adev->vm_manager.lock_pstate);
3050 }
3051 
3052 /**
3053  * amdgpu_vm_manager_fini - cleanup VM manager
3054  *
3055  * @adev: amdgpu_device pointer
3056  *
3057  * Cleanup the VM manager and free resources.
3058  */
3059 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3060 {
3061 	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
3062 	idr_destroy(&adev->vm_manager.pasid_idr);
3063 
3064 	amdgpu_vmid_mgr_fini(adev);
3065 }
3066 
3067 /**
3068  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3069  *
3070  * @dev: drm device pointer
3071  * @data: drm_amdgpu_vm
3072  * @filp: drm file pointer
3073  *
3074  * Returns:
3075  * 0 for success, -errno for errors.
3076  */
3077 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3078 {
3079 	union drm_amdgpu_vm *args = data;
3080 	struct amdgpu_device *adev = dev->dev_private;
3081 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
3082 	int r;
3083 
3084 	switch (args->in.op) {
3085 	case AMDGPU_VM_OP_RESERVE_VMID:
3086 		/* We only have requirement to reserve vmid from gfxhub */
3087 		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
3088 					       AMDGPU_GFXHUB_0);
3089 		if (r)
3090 			return r;
3091 		break;
3092 	case AMDGPU_VM_OP_UNRESERVE_VMID:
3093 		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
3094 		break;
3095 	default:
3096 		return -EINVAL;
3097 	}
3098 
3099 	return 0;
3100 }
3101 
3102 /**
3103  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3104  *
3105  * @adev: drm device pointer
3106  * @pasid: PASID identifier for VM
3107  * @task_info: task_info to fill.
3108  */
3109 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3110 			 struct amdgpu_task_info *task_info)
3111 {
3112 	struct amdgpu_vm *vm;
3113 	unsigned long flags;
3114 
3115 	spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3116 
3117 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3118 	if (vm)
3119 		*task_info = vm->task_info;
3120 
3121 	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3122 }
3123 
3124 /**
3125  * amdgpu_vm_set_task_info - Sets VMs task info.
3126  *
3127  * @vm: vm for which to set the info
3128  */
3129 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3130 {
3131 	if (vm->task_info.pid)
3132 		return;
3133 
3134 	vm->task_info.pid = current->pid;
3135 	get_task_comm(vm->task_info.task_name, current);
3136 
3137 	if (current->group_leader->mm != current->mm)
3138 		return;
3139 
3140 	vm->task_info.tgid = current->group_leader->pid;
3141 	get_task_comm(vm->task_info.process_name, current->group_leader);
3142 }
3143 
3144 /**
3145  * amdgpu_vm_handle_fault - graceful handling of VM faults.
3146  * @adev: amdgpu device pointer
3147  * @pasid: PASID of the VM
3148  * @addr: Address of the fault
3149  *
3150  * Try to gracefully handle a VM fault. Return true if the fault was handled and
3151  * shouldn't be reported any more.
3152  */
3153 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
3154 			    uint64_t addr)
3155 {
3156 	struct amdgpu_bo *root;
3157 	uint64_t value, flags;
3158 	struct amdgpu_vm *vm;
3159 	long r;
3160 
3161 	spin_lock(&adev->vm_manager.pasid_lock);
3162 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3163 	if (vm)
3164 		root = amdgpu_bo_ref(vm->root.base.bo);
3165 	else
3166 		root = NULL;
3167 	spin_unlock(&adev->vm_manager.pasid_lock);
3168 
3169 	if (!root)
3170 		return false;
3171 
3172 	r = amdgpu_bo_reserve(root, true);
3173 	if (r)
3174 		goto error_unref;
3175 
3176 	/* Double check that the VM still exists */
3177 	spin_lock(&adev->vm_manager.pasid_lock);
3178 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3179 	if (vm && vm->root.base.bo != root)
3180 		vm = NULL;
3181 	spin_unlock(&adev->vm_manager.pasid_lock);
3182 	if (!vm)
3183 		goto error_unlock;
3184 
3185 	addr /= AMDGPU_GPU_PAGE_SIZE;
3186 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
3187 		AMDGPU_PTE_SYSTEM;
3188 
3189 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
3190 		/* Redirect the access to the dummy page */
3191 		value = adev->dummy_page_addr;
3192 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
3193 			AMDGPU_PTE_WRITEABLE;
3194 	} else {
3195 		/* Let the hw retry silently on the PTE */
3196 		value = 0;
3197 	}
3198 
3199 	r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
3200 					flags, value, NULL, NULL);
3201 	if (r)
3202 		goto error_unlock;
3203 
3204 	r = amdgpu_vm_update_pdes(adev, vm, true);
3205 
3206 error_unlock:
3207 	amdgpu_bo_unreserve(root);
3208 	if (r < 0)
3209 		DRM_ERROR("Can't handle page fault (%ld)\n", r);
3210 
3211 error_unref:
3212 	amdgpu_bo_unref(&root);
3213 
3214 	return false;
3215 }
3216