1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
37 
38 /**
39  * DOC: GPUVM
40  *
41  * GPUVM is similar to the legacy gart on older asics, however
42  * rather than there being a single global gart table
43  * for the entire GPU, there are multiple VM page tables active
44  * at any given time.  The VM page tables can contain a mix
45  * vram pages and system memory pages and system memory pages
46  * can be mapped as snooped (cached system pages) or unsnooped
47  * (uncached system pages).
48  * Each VM has an ID associated with it and there is a page table
49  * associated with each VMID.  When execting a command buffer,
50  * the kernel tells the the ring what VMID to use for that command
51  * buffer.  VMIDs are allocated dynamically as commands are submitted.
52  * The userspace drivers maintain their own address space and the kernel
53  * sets up their pages tables accordingly when they submit their
54  * command buffers and a VMID is assigned.
55  * Cayman/Trinity support up to 8 active VMs at any given time;
56  * SI supports 16.
57  */
58 
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
61 
62 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
63 		     START, LAST, static, amdgpu_vm_it)
64 
65 #undef START
66 #undef LAST
67 
68 /**
69  * struct amdgpu_pte_update_params - Local structure
70  *
71  * Encapsulate some VM table update parameters to reduce
72  * the number of function parameters
73  *
74  */
75 struct amdgpu_pte_update_params {
76 
77 	/**
78 	 * @adev: amdgpu device we do this update for
79 	 */
80 	struct amdgpu_device *adev;
81 
82 	/**
83 	 * @vm: optional amdgpu_vm we do this update for
84 	 */
85 	struct amdgpu_vm *vm;
86 
87 	/**
88 	 * @src: address where to copy page table entries from
89 	 */
90 	uint64_t src;
91 
92 	/**
93 	 * @ib: indirect buffer to fill with commands
94 	 */
95 	struct amdgpu_ib *ib;
96 
97 	/**
98 	 * @func: Function which actually does the update
99 	 */
100 	void (*func)(struct amdgpu_pte_update_params *params,
101 		     struct amdgpu_bo *bo, uint64_t pe,
102 		     uint64_t addr, unsigned count, uint32_t incr,
103 		     uint64_t flags);
104 	/**
105 	 * @pages_addr:
106 	 *
107 	 * DMA addresses to use for mapping, used during VM update by CPU
108 	 */
109 	dma_addr_t *pages_addr;
110 
111 	/**
112 	 * @kptr:
113 	 *
114 	 * Kernel pointer of PD/PT BO that needs to be updated,
115 	 * used during VM update by CPU
116 	 */
117 	void *kptr;
118 };
119 
120 /**
121  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
122  */
123 struct amdgpu_prt_cb {
124 
125 	/**
126 	 * @adev: amdgpu device
127 	 */
128 	struct amdgpu_device *adev;
129 
130 	/**
131 	 * @cb: callback
132 	 */
133 	struct dma_fence_cb cb;
134 };
135 
136 /**
137  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
138  *
139  * @base: base structure for tracking BO usage in a VM
140  * @vm: vm to which bo is to be added
141  * @bo: amdgpu buffer object
142  *
143  * Initialize a bo_va_base structure and add it to the appropriate lists
144  *
145  */
146 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
147 				   struct amdgpu_vm *vm,
148 				   struct amdgpu_bo *bo)
149 {
150 	base->vm = vm;
151 	base->bo = bo;
152 	INIT_LIST_HEAD(&base->bo_list);
153 	INIT_LIST_HEAD(&base->vm_status);
154 
155 	if (!bo)
156 		return;
157 	list_add_tail(&base->bo_list, &bo->va);
158 
159 	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
160 		return;
161 
162 	if (bo->preferred_domains &
163 	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
164 		return;
165 
166 	/*
167 	 * we checked all the prerequisites, but it looks like this per vm bo
168 	 * is currently evicted. add the bo to the evicted list to make sure it
169 	 * is validated on next vm use to avoid fault.
170 	 * */
171 	list_move_tail(&base->vm_status, &vm->evicted);
172 }
173 
174 /**
175  * amdgpu_vm_level_shift - return the addr shift for each level
176  *
177  * @adev: amdgpu_device pointer
178  * @level: VMPT level
179  *
180  * Returns:
181  * The number of bits the pfn needs to be right shifted for a level.
182  */
183 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
184 				      unsigned level)
185 {
186 	unsigned shift = 0xff;
187 
188 	switch (level) {
189 	case AMDGPU_VM_PDB2:
190 	case AMDGPU_VM_PDB1:
191 	case AMDGPU_VM_PDB0:
192 		shift = 9 * (AMDGPU_VM_PDB0 - level) +
193 			adev->vm_manager.block_size;
194 		break;
195 	case AMDGPU_VM_PTB:
196 		shift = 0;
197 		break;
198 	default:
199 		dev_err(adev->dev, "the level%d isn't supported.\n", level);
200 	}
201 
202 	return shift;
203 }
204 
205 /**
206  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
207  *
208  * @adev: amdgpu_device pointer
209  * @level: VMPT level
210  *
211  * Returns:
212  * The number of entries in a page directory or page table.
213  */
214 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
215 				      unsigned level)
216 {
217 	unsigned shift = amdgpu_vm_level_shift(adev,
218 					       adev->vm_manager.root_level);
219 
220 	if (level == adev->vm_manager.root_level)
221 		/* For the root directory */
222 		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
223 	else if (level != AMDGPU_VM_PTB)
224 		/* Everything in between */
225 		return 512;
226 	else
227 		/* For the page tables on the leaves */
228 		return AMDGPU_VM_PTE_COUNT(adev);
229 }
230 
231 /**
232  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
233  *
234  * @adev: amdgpu_device pointer
235  * @level: VMPT level
236  *
237  * Returns:
238  * The size of the BO for a page directory or page table in bytes.
239  */
240 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
241 {
242 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
243 }
244 
245 /**
246  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
247  *
248  * @vm: vm providing the BOs
249  * @validated: head of validation list
250  * @entry: entry to add
251  *
252  * Add the page directory to the list of BOs to
253  * validate for command submission.
254  */
255 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
256 			 struct list_head *validated,
257 			 struct amdgpu_bo_list_entry *entry)
258 {
259 	entry->robj = vm->root.base.bo;
260 	entry->priority = 0;
261 	entry->tv.bo = &entry->robj->tbo;
262 	entry->tv.shared = true;
263 	entry->user_pages = NULL;
264 	list_add(&entry->tv.head, validated);
265 }
266 
267 /**
268  * amdgpu_vm_validate_pt_bos - validate the page table BOs
269  *
270  * @adev: amdgpu device pointer
271  * @vm: vm providing the BOs
272  * @validate: callback to do the validation
273  * @param: parameter for the validation callback
274  *
275  * Validate the page table BOs on command submission if neccessary.
276  *
277  * Returns:
278  * Validation result.
279  */
280 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
281 			      int (*validate)(void *p, struct amdgpu_bo *bo),
282 			      void *param)
283 {
284 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
285 	struct amdgpu_vm_bo_base *bo_base, *tmp;
286 	int r = 0;
287 
288 	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
289 		struct amdgpu_bo *bo = bo_base->bo;
290 
291 		if (bo->parent) {
292 			r = validate(param, bo);
293 			if (r)
294 				break;
295 
296 			spin_lock(&glob->lru_lock);
297 			ttm_bo_move_to_lru_tail(&bo->tbo);
298 			if (bo->shadow)
299 				ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
300 			spin_unlock(&glob->lru_lock);
301 		}
302 
303 		if (bo->tbo.type != ttm_bo_type_kernel) {
304 			spin_lock(&vm->moved_lock);
305 			list_move(&bo_base->vm_status, &vm->moved);
306 			spin_unlock(&vm->moved_lock);
307 		} else {
308 			list_move(&bo_base->vm_status, &vm->relocated);
309 		}
310 	}
311 
312 	spin_lock(&glob->lru_lock);
313 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
314 		struct amdgpu_bo *bo = bo_base->bo;
315 
316 		if (!bo->parent)
317 			continue;
318 
319 		ttm_bo_move_to_lru_tail(&bo->tbo);
320 		if (bo->shadow)
321 			ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
322 	}
323 	spin_unlock(&glob->lru_lock);
324 
325 	return r;
326 }
327 
328 /**
329  * amdgpu_vm_ready - check VM is ready for updates
330  *
331  * @vm: VM to check
332  *
333  * Check if all VM PDs/PTs are ready for updates
334  *
335  * Returns:
336  * True if eviction list is empty.
337  */
338 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
339 {
340 	return list_empty(&vm->evicted);
341 }
342 
343 /**
344  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
345  *
346  * @adev: amdgpu_device pointer
347  * @vm: VM to clear BO from
348  * @bo: BO to clear
349  * @level: level this BO is at
350  * @pte_support_ats: indicate ATS support from PTE
351  *
352  * Root PD needs to be reserved when calling this.
353  *
354  * Returns:
355  * 0 on success, errno otherwise.
356  */
357 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
358 			      struct amdgpu_vm *vm, struct amdgpu_bo *bo,
359 			      unsigned level, bool pte_support_ats)
360 {
361 	struct ttm_operation_ctx ctx = { true, false };
362 	struct dma_fence *fence = NULL;
363 	unsigned entries, ats_entries;
364 	struct amdgpu_ring *ring;
365 	struct amdgpu_job *job;
366 	uint64_t addr;
367 	int r;
368 
369 	addr = amdgpu_bo_gpu_offset(bo);
370 	entries = amdgpu_bo_size(bo) / 8;
371 
372 	if (pte_support_ats) {
373 		if (level == adev->vm_manager.root_level) {
374 			ats_entries = amdgpu_vm_level_shift(adev, level);
375 			ats_entries += AMDGPU_GPU_PAGE_SHIFT;
376 			ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
377 			ats_entries = min(ats_entries, entries);
378 			entries -= ats_entries;
379 		} else {
380 			ats_entries = entries;
381 			entries = 0;
382 		}
383 	} else {
384 		ats_entries = 0;
385 	}
386 
387 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
388 
389 	r = reservation_object_reserve_shared(bo->tbo.resv);
390 	if (r)
391 		return r;
392 
393 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
394 	if (r)
395 		goto error;
396 
397 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
398 	if (r)
399 		goto error;
400 
401 	if (ats_entries) {
402 		uint64_t ats_value;
403 
404 		ats_value = AMDGPU_PTE_DEFAULT_ATC;
405 		if (level != AMDGPU_VM_PTB)
406 			ats_value |= AMDGPU_PDE_PTE;
407 
408 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
409 				      ats_entries, 0, ats_value);
410 		addr += ats_entries * 8;
411 	}
412 
413 	if (entries)
414 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
415 				      entries, 0, 0);
416 
417 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
418 
419 	WARN_ON(job->ibs[0].length_dw > 64);
420 	r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
421 			     AMDGPU_FENCE_OWNER_UNDEFINED, false);
422 	if (r)
423 		goto error_free;
424 
425 	r = amdgpu_job_submit(job, ring, &vm->entity,
426 			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
427 	if (r)
428 		goto error_free;
429 
430 	amdgpu_bo_fence(bo, fence, true);
431 	dma_fence_put(fence);
432 
433 	if (bo->shadow)
434 		return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
435 					  level, pte_support_ats);
436 
437 	return 0;
438 
439 error_free:
440 	amdgpu_job_free(job);
441 
442 error:
443 	return r;
444 }
445 
446 /**
447  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
448  *
449  * @adev: amdgpu_device pointer
450  * @vm: requested vm
451  * @parent: parent PT
452  * @saddr: start of the address range
453  * @eaddr: end of the address range
454  * @level: VMPT level
455  * @ats: indicate ATS support from PTE
456  *
457  * Make sure the page directories and page tables are allocated
458  *
459  * Returns:
460  * 0 on success, errno otherwise.
461  */
462 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
463 				  struct amdgpu_vm *vm,
464 				  struct amdgpu_vm_pt *parent,
465 				  uint64_t saddr, uint64_t eaddr,
466 				  unsigned level, bool ats)
467 {
468 	unsigned shift = amdgpu_vm_level_shift(adev, level);
469 	unsigned pt_idx, from, to;
470 	u64 flags;
471 	int r;
472 
473 	if (!parent->entries) {
474 		unsigned num_entries = amdgpu_vm_num_entries(adev, level);
475 
476 		parent->entries = kvmalloc_array(num_entries,
477 						   sizeof(struct amdgpu_vm_pt),
478 						   GFP_KERNEL | __GFP_ZERO);
479 		if (!parent->entries)
480 			return -ENOMEM;
481 		memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
482 	}
483 
484 	from = saddr >> shift;
485 	to = eaddr >> shift;
486 	if (from >= amdgpu_vm_num_entries(adev, level) ||
487 	    to >= amdgpu_vm_num_entries(adev, level))
488 		return -EINVAL;
489 
490 	++level;
491 	saddr = saddr & ((1 << shift) - 1);
492 	eaddr = eaddr & ((1 << shift) - 1);
493 
494 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
495 	if (vm->use_cpu_for_update)
496 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
497 	else
498 		flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
499 				AMDGPU_GEM_CREATE_SHADOW);
500 
501 	/* walk over the address space and allocate the page tables */
502 	for (pt_idx = from; pt_idx <= to; ++pt_idx) {
503 		struct reservation_object *resv = vm->root.base.bo->tbo.resv;
504 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
505 		struct amdgpu_bo *pt;
506 
507 		if (!entry->base.bo) {
508 			struct amdgpu_bo_param bp;
509 
510 			memset(&bp, 0, sizeof(bp));
511 			bp.size = amdgpu_vm_bo_size(adev, level);
512 			bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
513 			bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
514 			bp.flags = flags;
515 			bp.type = ttm_bo_type_kernel;
516 			bp.resv = resv;
517 			r = amdgpu_bo_create(adev, &bp, &pt);
518 			if (r)
519 				return r;
520 
521 			r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
522 			if (r) {
523 				amdgpu_bo_unref(&pt->shadow);
524 				amdgpu_bo_unref(&pt);
525 				return r;
526 			}
527 
528 			if (vm->use_cpu_for_update) {
529 				r = amdgpu_bo_kmap(pt, NULL);
530 				if (r) {
531 					amdgpu_bo_unref(&pt->shadow);
532 					amdgpu_bo_unref(&pt);
533 					return r;
534 				}
535 			}
536 
537 			/* Keep a reference to the root directory to avoid
538 			* freeing them up in the wrong order.
539 			*/
540 			pt->parent = amdgpu_bo_ref(parent->base.bo);
541 
542 			amdgpu_vm_bo_base_init(&entry->base, vm, pt);
543 			list_move(&entry->base.vm_status, &vm->relocated);
544 		}
545 
546 		if (level < AMDGPU_VM_PTB) {
547 			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
548 			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
549 				((1 << shift) - 1);
550 			r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
551 						   sub_eaddr, level, ats);
552 			if (r)
553 				return r;
554 		}
555 	}
556 
557 	return 0;
558 }
559 
560 /**
561  * amdgpu_vm_alloc_pts - Allocate page tables.
562  *
563  * @adev: amdgpu_device pointer
564  * @vm: VM to allocate page tables for
565  * @saddr: Start address which needs to be allocated
566  * @size: Size from start address we need.
567  *
568  * Make sure the page tables are allocated.
569  *
570  * Returns:
571  * 0 on success, errno otherwise.
572  */
573 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
574 			struct amdgpu_vm *vm,
575 			uint64_t saddr, uint64_t size)
576 {
577 	uint64_t eaddr;
578 	bool ats = false;
579 
580 	/* validate the parameters */
581 	if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
582 		return -EINVAL;
583 
584 	eaddr = saddr + size - 1;
585 
586 	if (vm->pte_support_ats)
587 		ats = saddr < AMDGPU_VA_HOLE_START;
588 
589 	saddr /= AMDGPU_GPU_PAGE_SIZE;
590 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
591 
592 	if (eaddr >= adev->vm_manager.max_pfn) {
593 		dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
594 			eaddr, adev->vm_manager.max_pfn);
595 		return -EINVAL;
596 	}
597 
598 	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
599 				      adev->vm_manager.root_level, ats);
600 }
601 
602 /**
603  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
604  *
605  * @adev: amdgpu_device pointer
606  */
607 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
608 {
609 	const struct amdgpu_ip_block *ip_block;
610 	bool has_compute_vm_bug;
611 	struct amdgpu_ring *ring;
612 	int i;
613 
614 	has_compute_vm_bug = false;
615 
616 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
617 	if (ip_block) {
618 		/* Compute has a VM bug for GFX version < 7.
619 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
620 		if (ip_block->version->major <= 7)
621 			has_compute_vm_bug = true;
622 		else if (ip_block->version->major == 8)
623 			if (adev->gfx.mec_fw_version < 673)
624 				has_compute_vm_bug = true;
625 	}
626 
627 	for (i = 0; i < adev->num_rings; i++) {
628 		ring = adev->rings[i];
629 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
630 			/* only compute rings */
631 			ring->has_compute_vm_bug = has_compute_vm_bug;
632 		else
633 			ring->has_compute_vm_bug = false;
634 	}
635 }
636 
637 /**
638  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
639  *
640  * @ring: ring on which the job will be submitted
641  * @job: job to submit
642  *
643  * Returns:
644  * True if sync is needed.
645  */
646 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
647 				  struct amdgpu_job *job)
648 {
649 	struct amdgpu_device *adev = ring->adev;
650 	unsigned vmhub = ring->funcs->vmhub;
651 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
652 	struct amdgpu_vmid *id;
653 	bool gds_switch_needed;
654 	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
655 
656 	if (job->vmid == 0)
657 		return false;
658 	id = &id_mgr->ids[job->vmid];
659 	gds_switch_needed = ring->funcs->emit_gds_switch && (
660 		id->gds_base != job->gds_base ||
661 		id->gds_size != job->gds_size ||
662 		id->gws_base != job->gws_base ||
663 		id->gws_size != job->gws_size ||
664 		id->oa_base != job->oa_base ||
665 		id->oa_size != job->oa_size);
666 
667 	if (amdgpu_vmid_had_gpu_reset(adev, id))
668 		return true;
669 
670 	return vm_flush_needed || gds_switch_needed;
671 }
672 
673 /**
674  * amdgpu_vm_flush - hardware flush the vm
675  *
676  * @ring: ring to use for flush
677  * @job:  related job
678  * @need_pipe_sync: is pipe sync needed
679  *
680  * Emit a VM flush when it is necessary.
681  *
682  * Returns:
683  * 0 on success, errno otherwise.
684  */
685 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
686 {
687 	struct amdgpu_device *adev = ring->adev;
688 	unsigned vmhub = ring->funcs->vmhub;
689 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
690 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
691 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
692 		id->gds_base != job->gds_base ||
693 		id->gds_size != job->gds_size ||
694 		id->gws_base != job->gws_base ||
695 		id->gws_size != job->gws_size ||
696 		id->oa_base != job->oa_base ||
697 		id->oa_size != job->oa_size);
698 	bool vm_flush_needed = job->vm_needs_flush;
699 	bool pasid_mapping_needed = id->pasid != job->pasid ||
700 		!id->pasid_mapping ||
701 		!dma_fence_is_signaled(id->pasid_mapping);
702 	struct dma_fence *fence = NULL;
703 	unsigned patch_offset = 0;
704 	int r;
705 
706 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
707 		gds_switch_needed = true;
708 		vm_flush_needed = true;
709 		pasid_mapping_needed = true;
710 	}
711 
712 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
713 	vm_flush_needed &= !!ring->funcs->emit_vm_flush;
714 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
715 		ring->funcs->emit_wreg;
716 
717 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
718 		return 0;
719 
720 	if (ring->funcs->init_cond_exec)
721 		patch_offset = amdgpu_ring_init_cond_exec(ring);
722 
723 	if (need_pipe_sync)
724 		amdgpu_ring_emit_pipeline_sync(ring);
725 
726 	if (vm_flush_needed) {
727 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
728 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
729 	}
730 
731 	if (pasid_mapping_needed)
732 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
733 
734 	if (vm_flush_needed || pasid_mapping_needed) {
735 		r = amdgpu_fence_emit(ring, &fence, 0);
736 		if (r)
737 			return r;
738 	}
739 
740 	if (vm_flush_needed) {
741 		mutex_lock(&id_mgr->lock);
742 		dma_fence_put(id->last_flush);
743 		id->last_flush = dma_fence_get(fence);
744 		id->current_gpu_reset_count =
745 			atomic_read(&adev->gpu_reset_counter);
746 		mutex_unlock(&id_mgr->lock);
747 	}
748 
749 	if (pasid_mapping_needed) {
750 		id->pasid = job->pasid;
751 		dma_fence_put(id->pasid_mapping);
752 		id->pasid_mapping = dma_fence_get(fence);
753 	}
754 	dma_fence_put(fence);
755 
756 	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
757 		id->gds_base = job->gds_base;
758 		id->gds_size = job->gds_size;
759 		id->gws_base = job->gws_base;
760 		id->gws_size = job->gws_size;
761 		id->oa_base = job->oa_base;
762 		id->oa_size = job->oa_size;
763 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
764 					    job->gds_size, job->gws_base,
765 					    job->gws_size, job->oa_base,
766 					    job->oa_size);
767 	}
768 
769 	if (ring->funcs->patch_cond_exec)
770 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
771 
772 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
773 	if (ring->funcs->emit_switch_buffer) {
774 		amdgpu_ring_emit_switch_buffer(ring);
775 		amdgpu_ring_emit_switch_buffer(ring);
776 	}
777 	return 0;
778 }
779 
780 /**
781  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
782  *
783  * @vm: requested vm
784  * @bo: requested buffer object
785  *
786  * Find @bo inside the requested vm.
787  * Search inside the @bos vm list for the requested vm
788  * Returns the found bo_va or NULL if none is found
789  *
790  * Object has to be reserved!
791  *
792  * Returns:
793  * Found bo_va or NULL.
794  */
795 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
796 				       struct amdgpu_bo *bo)
797 {
798 	struct amdgpu_bo_va *bo_va;
799 
800 	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
801 		if (bo_va->base.vm == vm) {
802 			return bo_va;
803 		}
804 	}
805 	return NULL;
806 }
807 
808 /**
809  * amdgpu_vm_do_set_ptes - helper to call the right asic function
810  *
811  * @params: see amdgpu_pte_update_params definition
812  * @bo: PD/PT to update
813  * @pe: addr of the page entry
814  * @addr: dst addr to write into pe
815  * @count: number of page entries to update
816  * @incr: increase next addr by incr bytes
817  * @flags: hw access flags
818  *
819  * Traces the parameters and calls the right asic functions
820  * to setup the page table using the DMA.
821  */
822 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
823 				  struct amdgpu_bo *bo,
824 				  uint64_t pe, uint64_t addr,
825 				  unsigned count, uint32_t incr,
826 				  uint64_t flags)
827 {
828 	pe += amdgpu_bo_gpu_offset(bo);
829 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
830 
831 	if (count < 3) {
832 		amdgpu_vm_write_pte(params->adev, params->ib, pe,
833 				    addr | flags, count, incr);
834 
835 	} else {
836 		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
837 				      count, incr, flags);
838 	}
839 }
840 
841 /**
842  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
843  *
844  * @params: see amdgpu_pte_update_params definition
845  * @bo: PD/PT to update
846  * @pe: addr of the page entry
847  * @addr: dst addr to write into pe
848  * @count: number of page entries to update
849  * @incr: increase next addr by incr bytes
850  * @flags: hw access flags
851  *
852  * Traces the parameters and calls the DMA function to copy the PTEs.
853  */
854 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
855 				   struct amdgpu_bo *bo,
856 				   uint64_t pe, uint64_t addr,
857 				   unsigned count, uint32_t incr,
858 				   uint64_t flags)
859 {
860 	uint64_t src = (params->src + (addr >> 12) * 8);
861 
862 	pe += amdgpu_bo_gpu_offset(bo);
863 	trace_amdgpu_vm_copy_ptes(pe, src, count);
864 
865 	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
866 }
867 
868 /**
869  * amdgpu_vm_map_gart - Resolve gart mapping of addr
870  *
871  * @pages_addr: optional DMA address to use for lookup
872  * @addr: the unmapped addr
873  *
874  * Look up the physical address of the page that the pte resolves
875  * to.
876  *
877  * Returns:
878  * The pointer for the page table entry.
879  */
880 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
881 {
882 	uint64_t result;
883 
884 	/* page table offset */
885 	result = pages_addr[addr >> PAGE_SHIFT];
886 
887 	/* in case cpu page size != gpu page size*/
888 	result |= addr & (~PAGE_MASK);
889 
890 	result &= 0xFFFFFFFFFFFFF000ULL;
891 
892 	return result;
893 }
894 
895 /**
896  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
897  *
898  * @params: see amdgpu_pte_update_params definition
899  * @bo: PD/PT to update
900  * @pe: kmap addr of the page entry
901  * @addr: dst addr to write into pe
902  * @count: number of page entries to update
903  * @incr: increase next addr by incr bytes
904  * @flags: hw access flags
905  *
906  * Write count number of PT/PD entries directly.
907  */
908 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
909 				   struct amdgpu_bo *bo,
910 				   uint64_t pe, uint64_t addr,
911 				   unsigned count, uint32_t incr,
912 				   uint64_t flags)
913 {
914 	unsigned int i;
915 	uint64_t value;
916 
917 	pe += (unsigned long)amdgpu_bo_kptr(bo);
918 
919 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
920 
921 	for (i = 0; i < count; i++) {
922 		value = params->pages_addr ?
923 			amdgpu_vm_map_gart(params->pages_addr, addr) :
924 			addr;
925 		amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
926 				       i, value, flags);
927 		addr += incr;
928 	}
929 }
930 
931 
932 /**
933  * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
934  *
935  * @adev: amdgpu_device pointer
936  * @vm: related vm
937  * @owner: fence owner
938  *
939  * Returns:
940  * 0 on success, errno otherwise.
941  */
942 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
943 			     void *owner)
944 {
945 	struct amdgpu_sync sync;
946 	int r;
947 
948 	amdgpu_sync_create(&sync);
949 	amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
950 	r = amdgpu_sync_wait(&sync, true);
951 	amdgpu_sync_free(&sync);
952 
953 	return r;
954 }
955 
956 /*
957  * amdgpu_vm_update_pde - update a single level in the hierarchy
958  *
959  * @param: parameters for the update
960  * @vm: requested vm
961  * @parent: parent directory
962  * @entry: entry to update
963  *
964  * Makes sure the requested entry in parent is up to date.
965  */
966 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
967 				 struct amdgpu_vm *vm,
968 				 struct amdgpu_vm_pt *parent,
969 				 struct amdgpu_vm_pt *entry)
970 {
971 	struct amdgpu_bo *bo = parent->base.bo, *pbo;
972 	uint64_t pde, pt, flags;
973 	unsigned level;
974 
975 	/* Don't update huge pages here */
976 	if (entry->huge)
977 		return;
978 
979 	for (level = 0, pbo = bo->parent; pbo; ++level)
980 		pbo = pbo->parent;
981 
982 	level += params->adev->vm_manager.root_level;
983 	pt = amdgpu_bo_gpu_offset(entry->base.bo);
984 	flags = AMDGPU_PTE_VALID;
985 	amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
986 	pde = (entry - parent->entries) * 8;
987 	if (bo->shadow)
988 		params->func(params, bo->shadow, pde, pt, 1, 0, flags);
989 	params->func(params, bo, pde, pt, 1, 0, flags);
990 }
991 
992 /*
993  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
994  *
995  * @adev: amdgpu_device pointer
996  * @vm: related vm
997  * @parent: parent PD
998  * @level: VMPT level
999  *
1000  * Mark all PD level as invalid after an error.
1001  */
1002 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
1003 				       struct amdgpu_vm *vm,
1004 				       struct amdgpu_vm_pt *parent,
1005 				       unsigned level)
1006 {
1007 	unsigned pt_idx, num_entries;
1008 
1009 	/*
1010 	 * Recurse into the subdirectories. This recursion is harmless because
1011 	 * we only have a maximum of 5 layers.
1012 	 */
1013 	num_entries = amdgpu_vm_num_entries(adev, level);
1014 	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
1015 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1016 
1017 		if (!entry->base.bo)
1018 			continue;
1019 
1020 		if (!entry->base.moved)
1021 			list_move(&entry->base.vm_status, &vm->relocated);
1022 		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
1023 	}
1024 }
1025 
1026 /*
1027  * amdgpu_vm_update_directories - make sure that all directories are valid
1028  *
1029  * @adev: amdgpu_device pointer
1030  * @vm: requested vm
1031  *
1032  * Makes sure all directories are up to date.
1033  *
1034  * Returns:
1035  * 0 for success, error for failure.
1036  */
1037 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1038 				 struct amdgpu_vm *vm)
1039 {
1040 	struct amdgpu_pte_update_params params;
1041 	struct amdgpu_job *job;
1042 	unsigned ndw = 0;
1043 	int r = 0;
1044 
1045 	if (list_empty(&vm->relocated))
1046 		return 0;
1047 
1048 restart:
1049 	memset(&params, 0, sizeof(params));
1050 	params.adev = adev;
1051 
1052 	if (vm->use_cpu_for_update) {
1053 		struct amdgpu_vm_bo_base *bo_base;
1054 
1055 		list_for_each_entry(bo_base, &vm->relocated, vm_status) {
1056 			r = amdgpu_bo_kmap(bo_base->bo, NULL);
1057 			if (unlikely(r))
1058 				return r;
1059 		}
1060 
1061 		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1062 		if (unlikely(r))
1063 			return r;
1064 
1065 		params.func = amdgpu_vm_cpu_set_ptes;
1066 	} else {
1067 		ndw = 512 * 8;
1068 		r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1069 		if (r)
1070 			return r;
1071 
1072 		params.ib = &job->ibs[0];
1073 		params.func = amdgpu_vm_do_set_ptes;
1074 	}
1075 
1076 	while (!list_empty(&vm->relocated)) {
1077 		struct amdgpu_vm_bo_base *bo_base, *parent;
1078 		struct amdgpu_vm_pt *pt, *entry;
1079 		struct amdgpu_bo *bo;
1080 
1081 		bo_base = list_first_entry(&vm->relocated,
1082 					   struct amdgpu_vm_bo_base,
1083 					   vm_status);
1084 		bo_base->moved = false;
1085 		list_move(&bo_base->vm_status, &vm->idle);
1086 
1087 		bo = bo_base->bo->parent;
1088 		if (!bo)
1089 			continue;
1090 
1091 		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
1092 					  bo_list);
1093 		pt = container_of(parent, struct amdgpu_vm_pt, base);
1094 		entry = container_of(bo_base, struct amdgpu_vm_pt, base);
1095 
1096 		amdgpu_vm_update_pde(&params, vm, pt, entry);
1097 
1098 		if (!vm->use_cpu_for_update &&
1099 		    (ndw - params.ib->length_dw) < 32)
1100 			break;
1101 	}
1102 
1103 	if (vm->use_cpu_for_update) {
1104 		/* Flush HDP */
1105 		mb();
1106 		amdgpu_asic_flush_hdp(adev, NULL);
1107 	} else if (params.ib->length_dw == 0) {
1108 		amdgpu_job_free(job);
1109 	} else {
1110 		struct amdgpu_bo *root = vm->root.base.bo;
1111 		struct amdgpu_ring *ring;
1112 		struct dma_fence *fence;
1113 
1114 		ring = container_of(vm->entity.sched, struct amdgpu_ring,
1115 				    sched);
1116 
1117 		amdgpu_ring_pad_ib(ring, params.ib);
1118 		amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1119 				 AMDGPU_FENCE_OWNER_VM, false);
1120 		WARN_ON(params.ib->length_dw > ndw);
1121 		r = amdgpu_job_submit(job, ring, &vm->entity,
1122 				      AMDGPU_FENCE_OWNER_VM, &fence);
1123 		if (r)
1124 			goto error;
1125 
1126 		amdgpu_bo_fence(root, fence, true);
1127 		dma_fence_put(vm->last_update);
1128 		vm->last_update = fence;
1129 	}
1130 
1131 	if (!list_empty(&vm->relocated))
1132 		goto restart;
1133 
1134 	return 0;
1135 
1136 error:
1137 	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1138 				   adev->vm_manager.root_level);
1139 	amdgpu_job_free(job);
1140 	return r;
1141 }
1142 
1143 /**
1144  * amdgpu_vm_find_entry - find the entry for an address
1145  *
1146  * @p: see amdgpu_pte_update_params definition
1147  * @addr: virtual address in question
1148  * @entry: resulting entry or NULL
1149  * @parent: parent entry
1150  *
1151  * Find the vm_pt entry and it's parent for the given address.
1152  */
1153 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1154 			 struct amdgpu_vm_pt **entry,
1155 			 struct amdgpu_vm_pt **parent)
1156 {
1157 	unsigned level = p->adev->vm_manager.root_level;
1158 
1159 	*parent = NULL;
1160 	*entry = &p->vm->root;
1161 	while ((*entry)->entries) {
1162 		unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1163 
1164 		*parent = *entry;
1165 		*entry = &(*entry)->entries[addr >> shift];
1166 		addr &= (1ULL << shift) - 1;
1167 	}
1168 
1169 	if (level != AMDGPU_VM_PTB)
1170 		*entry = NULL;
1171 }
1172 
1173 /**
1174  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1175  *
1176  * @p: see amdgpu_pte_update_params definition
1177  * @entry: vm_pt entry to check
1178  * @parent: parent entry
1179  * @nptes: number of PTEs updated with this operation
1180  * @dst: destination address where the PTEs should point to
1181  * @flags: access flags fro the PTEs
1182  *
1183  * Check if we can update the PD with a huge page.
1184  */
1185 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1186 					struct amdgpu_vm_pt *entry,
1187 					struct amdgpu_vm_pt *parent,
1188 					unsigned nptes, uint64_t dst,
1189 					uint64_t flags)
1190 {
1191 	uint64_t pde;
1192 
1193 	/* In the case of a mixed PT the PDE must point to it*/
1194 	if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1195 	    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1196 		/* Set the huge page flag to stop scanning at this PDE */
1197 		flags |= AMDGPU_PDE_PTE;
1198 	}
1199 
1200 	if (!(flags & AMDGPU_PDE_PTE)) {
1201 		if (entry->huge) {
1202 			/* Add the entry to the relocated list to update it. */
1203 			entry->huge = false;
1204 			list_move(&entry->base.vm_status, &p->vm->relocated);
1205 		}
1206 		return;
1207 	}
1208 
1209 	entry->huge = true;
1210 	amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1211 
1212 	pde = (entry - parent->entries) * 8;
1213 	if (parent->base.bo->shadow)
1214 		p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1215 	p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1216 }
1217 
1218 /**
1219  * amdgpu_vm_update_ptes - make sure that page tables are valid
1220  *
1221  * @params: see amdgpu_pte_update_params definition
1222  * @start: start of GPU address range
1223  * @end: end of GPU address range
1224  * @dst: destination address to map to, the next dst inside the function
1225  * @flags: mapping flags
1226  *
1227  * Update the page tables in the range @start - @end.
1228  *
1229  * Returns:
1230  * 0 for success, -EINVAL for failure.
1231  */
1232 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1233 				  uint64_t start, uint64_t end,
1234 				  uint64_t dst, uint64_t flags)
1235 {
1236 	struct amdgpu_device *adev = params->adev;
1237 	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1238 
1239 	uint64_t addr, pe_start;
1240 	struct amdgpu_bo *pt;
1241 	unsigned nptes;
1242 
1243 	/* walk over the address space and update the page tables */
1244 	for (addr = start; addr < end; addr += nptes,
1245 	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1246 		struct amdgpu_vm_pt *entry, *parent;
1247 
1248 		amdgpu_vm_get_entry(params, addr, &entry, &parent);
1249 		if (!entry)
1250 			return -ENOENT;
1251 
1252 		if ((addr & ~mask) == (end & ~mask))
1253 			nptes = end - addr;
1254 		else
1255 			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1256 
1257 		amdgpu_vm_handle_huge_pages(params, entry, parent,
1258 					    nptes, dst, flags);
1259 		/* We don't need to update PTEs for huge pages */
1260 		if (entry->huge)
1261 			continue;
1262 
1263 		pt = entry->base.bo;
1264 		pe_start = (addr & mask) * 8;
1265 		if (pt->shadow)
1266 			params->func(params, pt->shadow, pe_start, dst, nptes,
1267 				     AMDGPU_GPU_PAGE_SIZE, flags);
1268 		params->func(params, pt, pe_start, dst, nptes,
1269 			     AMDGPU_GPU_PAGE_SIZE, flags);
1270 	}
1271 
1272 	return 0;
1273 }
1274 
1275 /*
1276  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1277  *
1278  * @params: see amdgpu_pte_update_params definition
1279  * @vm: requested vm
1280  * @start: first PTE to handle
1281  * @end: last PTE to handle
1282  * @dst: addr those PTEs should point to
1283  * @flags: hw mapping flags
1284  *
1285  * Returns:
1286  * 0 for success, -EINVAL for failure.
1287  */
1288 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
1289 				uint64_t start, uint64_t end,
1290 				uint64_t dst, uint64_t flags)
1291 {
1292 	/**
1293 	 * The MC L1 TLB supports variable sized pages, based on a fragment
1294 	 * field in the PTE. When this field is set to a non-zero value, page
1295 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1296 	 * flags are considered valid for all PTEs within the fragment range
1297 	 * and corresponding mappings are assumed to be physically contiguous.
1298 	 *
1299 	 * The L1 TLB can store a single PTE for the whole fragment,
1300 	 * significantly increasing the space available for translation
1301 	 * caching. This leads to large improvements in throughput when the
1302 	 * TLB is under pressure.
1303 	 *
1304 	 * The L2 TLB distributes small and large fragments into two
1305 	 * asymmetric partitions. The large fragment cache is significantly
1306 	 * larger. Thus, we try to use large fragments wherever possible.
1307 	 * Userspace can support this by aligning virtual base address and
1308 	 * allocation size to the fragment size.
1309 	 */
1310 	unsigned max_frag = params->adev->vm_manager.fragment_size;
1311 	int r;
1312 
1313 	/* system pages are non continuously */
1314 	if (params->src || !(flags & AMDGPU_PTE_VALID))
1315 		return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1316 
1317 	while (start != end) {
1318 		uint64_t frag_flags, frag_end;
1319 		unsigned frag;
1320 
1321 		/* This intentionally wraps around if no bit is set */
1322 		frag = min((unsigned)ffs(start) - 1,
1323 			   (unsigned)fls64(end - start) - 1);
1324 		if (frag >= max_frag) {
1325 			frag_flags = AMDGPU_PTE_FRAG(max_frag);
1326 			frag_end = end & ~((1ULL << max_frag) - 1);
1327 		} else {
1328 			frag_flags = AMDGPU_PTE_FRAG(frag);
1329 			frag_end = start + (1 << frag);
1330 		}
1331 
1332 		r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1333 					  flags | frag_flags);
1334 		if (r)
1335 			return r;
1336 
1337 		dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1338 		start = frag_end;
1339 	}
1340 
1341 	return 0;
1342 }
1343 
1344 /**
1345  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1346  *
1347  * @adev: amdgpu_device pointer
1348  * @exclusive: fence we need to sync to
1349  * @pages_addr: DMA addresses to use for mapping
1350  * @vm: requested vm
1351  * @start: start of mapped range
1352  * @last: last mapped entry
1353  * @flags: flags for the entries
1354  * @addr: addr to set the area to
1355  * @fence: optional resulting fence
1356  *
1357  * Fill in the page table entries between @start and @last.
1358  *
1359  * Returns:
1360  * 0 for success, -EINVAL for failure.
1361  */
1362 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1363 				       struct dma_fence *exclusive,
1364 				       dma_addr_t *pages_addr,
1365 				       struct amdgpu_vm *vm,
1366 				       uint64_t start, uint64_t last,
1367 				       uint64_t flags, uint64_t addr,
1368 				       struct dma_fence **fence)
1369 {
1370 	struct amdgpu_ring *ring;
1371 	void *owner = AMDGPU_FENCE_OWNER_VM;
1372 	unsigned nptes, ncmds, ndw;
1373 	struct amdgpu_job *job;
1374 	struct amdgpu_pte_update_params params;
1375 	struct dma_fence *f = NULL;
1376 	int r;
1377 
1378 	memset(&params, 0, sizeof(params));
1379 	params.adev = adev;
1380 	params.vm = vm;
1381 
1382 	/* sync to everything on unmapping */
1383 	if (!(flags & AMDGPU_PTE_VALID))
1384 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1385 
1386 	if (vm->use_cpu_for_update) {
1387 		/* params.src is used as flag to indicate system Memory */
1388 		if (pages_addr)
1389 			params.src = ~0;
1390 
1391 		/* Wait for PT BOs to be free. PTs share the same resv. object
1392 		 * as the root PD BO
1393 		 */
1394 		r = amdgpu_vm_wait_pd(adev, vm, owner);
1395 		if (unlikely(r))
1396 			return r;
1397 
1398 		params.func = amdgpu_vm_cpu_set_ptes;
1399 		params.pages_addr = pages_addr;
1400 		return amdgpu_vm_frag_ptes(&params, start, last + 1,
1401 					   addr, flags);
1402 	}
1403 
1404 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1405 
1406 	nptes = last - start + 1;
1407 
1408 	/*
1409 	 * reserve space for two commands every (1 << BLOCK_SIZE)
1410 	 *  entries or 2k dwords (whatever is smaller)
1411          *
1412          * The second command is for the shadow pagetables.
1413 	 */
1414 	if (vm->root.base.bo->shadow)
1415 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1416 	else
1417 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1418 
1419 	/* padding, etc. */
1420 	ndw = 64;
1421 
1422 	if (pages_addr) {
1423 		/* copy commands needed */
1424 		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1425 
1426 		/* and also PTEs */
1427 		ndw += nptes * 2;
1428 
1429 		params.func = amdgpu_vm_do_copy_ptes;
1430 
1431 	} else {
1432 		/* set page commands needed */
1433 		ndw += ncmds * 10;
1434 
1435 		/* extra commands for begin/end fragments */
1436 		if (vm->root.base.bo->shadow)
1437 		        ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
1438 		else
1439 		        ndw += 2 * 10 * adev->vm_manager.fragment_size;
1440 
1441 		params.func = amdgpu_vm_do_set_ptes;
1442 	}
1443 
1444 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1445 	if (r)
1446 		return r;
1447 
1448 	params.ib = &job->ibs[0];
1449 
1450 	if (pages_addr) {
1451 		uint64_t *pte;
1452 		unsigned i;
1453 
1454 		/* Put the PTEs at the end of the IB. */
1455 		i = ndw - nptes * 2;
1456 		pte= (uint64_t *)&(job->ibs->ptr[i]);
1457 		params.src = job->ibs->gpu_addr + i * 4;
1458 
1459 		for (i = 0; i < nptes; ++i) {
1460 			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1461 						    AMDGPU_GPU_PAGE_SIZE);
1462 			pte[i] |= flags;
1463 		}
1464 		addr = 0;
1465 	}
1466 
1467 	r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1468 	if (r)
1469 		goto error_free;
1470 
1471 	r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1472 			     owner, false);
1473 	if (r)
1474 		goto error_free;
1475 
1476 	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1477 	if (r)
1478 		goto error_free;
1479 
1480 	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1481 	if (r)
1482 		goto error_free;
1483 
1484 	amdgpu_ring_pad_ib(ring, params.ib);
1485 	WARN_ON(params.ib->length_dw > ndw);
1486 	r = amdgpu_job_submit(job, ring, &vm->entity,
1487 			      AMDGPU_FENCE_OWNER_VM, &f);
1488 	if (r)
1489 		goto error_free;
1490 
1491 	amdgpu_bo_fence(vm->root.base.bo, f, true);
1492 	dma_fence_put(*fence);
1493 	*fence = f;
1494 	return 0;
1495 
1496 error_free:
1497 	amdgpu_job_free(job);
1498 	return r;
1499 }
1500 
1501 /**
1502  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1503  *
1504  * @adev: amdgpu_device pointer
1505  * @exclusive: fence we need to sync to
1506  * @pages_addr: DMA addresses to use for mapping
1507  * @vm: requested vm
1508  * @mapping: mapped range and flags to use for the update
1509  * @flags: HW flags for the mapping
1510  * @nodes: array of drm_mm_nodes with the MC addresses
1511  * @fence: optional resulting fence
1512  *
1513  * Split the mapping into smaller chunks so that each update fits
1514  * into a SDMA IB.
1515  *
1516  * Returns:
1517  * 0 for success, -EINVAL for failure.
1518  */
1519 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1520 				      struct dma_fence *exclusive,
1521 				      dma_addr_t *pages_addr,
1522 				      struct amdgpu_vm *vm,
1523 				      struct amdgpu_bo_va_mapping *mapping,
1524 				      uint64_t flags,
1525 				      struct drm_mm_node *nodes,
1526 				      struct dma_fence **fence)
1527 {
1528 	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1529 	uint64_t pfn, start = mapping->start;
1530 	int r;
1531 
1532 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1533 	 * but in case of something, we filter the flags in first place
1534 	 */
1535 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1536 		flags &= ~AMDGPU_PTE_READABLE;
1537 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1538 		flags &= ~AMDGPU_PTE_WRITEABLE;
1539 
1540 	flags &= ~AMDGPU_PTE_EXECUTABLE;
1541 	flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1542 
1543 	flags &= ~AMDGPU_PTE_MTYPE_MASK;
1544 	flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1545 
1546 	if ((mapping->flags & AMDGPU_PTE_PRT) &&
1547 	    (adev->asic_type >= CHIP_VEGA10)) {
1548 		flags |= AMDGPU_PTE_PRT;
1549 		flags &= ~AMDGPU_PTE_VALID;
1550 	}
1551 
1552 	trace_amdgpu_vm_bo_update(mapping);
1553 
1554 	pfn = mapping->offset >> PAGE_SHIFT;
1555 	if (nodes) {
1556 		while (pfn >= nodes->size) {
1557 			pfn -= nodes->size;
1558 			++nodes;
1559 		}
1560 	}
1561 
1562 	do {
1563 		dma_addr_t *dma_addr = NULL;
1564 		uint64_t max_entries;
1565 		uint64_t addr, last;
1566 
1567 		if (nodes) {
1568 			addr = nodes->start << PAGE_SHIFT;
1569 			max_entries = (nodes->size - pfn) *
1570 				(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1571 		} else {
1572 			addr = 0;
1573 			max_entries = S64_MAX;
1574 		}
1575 
1576 		if (pages_addr) {
1577 			uint64_t count;
1578 
1579 			max_entries = min(max_entries, 16ull * 1024ull);
1580 			for (count = 1; count < max_entries; ++count) {
1581 				uint64_t idx = pfn + count;
1582 
1583 				if (pages_addr[idx] !=
1584 				    (pages_addr[idx - 1] + PAGE_SIZE))
1585 					break;
1586 			}
1587 
1588 			if (count < min_linear_pages) {
1589 				addr = pfn << PAGE_SHIFT;
1590 				dma_addr = pages_addr;
1591 			} else {
1592 				addr = pages_addr[pfn];
1593 				max_entries = count;
1594 			}
1595 
1596 		} else if (flags & AMDGPU_PTE_VALID) {
1597 			addr += adev->vm_manager.vram_base_offset;
1598 			addr += pfn << PAGE_SHIFT;
1599 		}
1600 
1601 		last = min((uint64_t)mapping->last, start + max_entries - 1);
1602 		r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1603 						start, last, flags, addr,
1604 						fence);
1605 		if (r)
1606 			return r;
1607 
1608 		pfn += last - start + 1;
1609 		if (nodes && nodes->size == pfn) {
1610 			pfn = 0;
1611 			++nodes;
1612 		}
1613 		start = last + 1;
1614 
1615 	} while (unlikely(start != mapping->last + 1));
1616 
1617 	return 0;
1618 }
1619 
1620 /**
1621  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1622  *
1623  * @adev: amdgpu_device pointer
1624  * @bo_va: requested BO and VM object
1625  * @clear: if true clear the entries
1626  *
1627  * Fill in the page table entries for @bo_va.
1628  *
1629  * Returns:
1630  * 0 for success, -EINVAL for failure.
1631  */
1632 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1633 			struct amdgpu_bo_va *bo_va,
1634 			bool clear)
1635 {
1636 	struct amdgpu_bo *bo = bo_va->base.bo;
1637 	struct amdgpu_vm *vm = bo_va->base.vm;
1638 	struct amdgpu_bo_va_mapping *mapping;
1639 	dma_addr_t *pages_addr = NULL;
1640 	struct ttm_mem_reg *mem;
1641 	struct drm_mm_node *nodes;
1642 	struct dma_fence *exclusive, **last_update;
1643 	uint64_t flags;
1644 	int r;
1645 
1646 	if (clear || !bo_va->base.bo) {
1647 		mem = NULL;
1648 		nodes = NULL;
1649 		exclusive = NULL;
1650 	} else {
1651 		struct ttm_dma_tt *ttm;
1652 
1653 		mem = &bo_va->base.bo->tbo.mem;
1654 		nodes = mem->mm_node;
1655 		if (mem->mem_type == TTM_PL_TT) {
1656 			ttm = container_of(bo_va->base.bo->tbo.ttm,
1657 					   struct ttm_dma_tt, ttm);
1658 			pages_addr = ttm->dma_address;
1659 		}
1660 		exclusive = reservation_object_get_excl(bo->tbo.resv);
1661 	}
1662 
1663 	if (bo)
1664 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1665 	else
1666 		flags = 0x0;
1667 
1668 	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1669 		last_update = &vm->last_update;
1670 	else
1671 		last_update = &bo_va->last_pt_update;
1672 
1673 	if (!clear && bo_va->base.moved) {
1674 		bo_va->base.moved = false;
1675 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1676 
1677 	} else if (bo_va->cleared != clear) {
1678 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1679 	}
1680 
1681 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1682 		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1683 					       mapping, flags, nodes,
1684 					       last_update);
1685 		if (r)
1686 			return r;
1687 	}
1688 
1689 	if (vm->use_cpu_for_update) {
1690 		/* Flush HDP */
1691 		mb();
1692 		amdgpu_asic_flush_hdp(adev, NULL);
1693 	}
1694 
1695 	spin_lock(&vm->moved_lock);
1696 	list_del_init(&bo_va->base.vm_status);
1697 	spin_unlock(&vm->moved_lock);
1698 
1699 	/* If the BO is not in its preferred location add it back to
1700 	 * the evicted list so that it gets validated again on the
1701 	 * next command submission.
1702 	 */
1703 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1704 		uint32_t mem_type = bo->tbo.mem.mem_type;
1705 
1706 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1707 			list_add_tail(&bo_va->base.vm_status, &vm->evicted);
1708 		else
1709 			list_add(&bo_va->base.vm_status, &vm->idle);
1710 	}
1711 
1712 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1713 	bo_va->cleared = clear;
1714 
1715 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1716 		list_for_each_entry(mapping, &bo_va->valids, list)
1717 			trace_amdgpu_vm_bo_mapping(mapping);
1718 	}
1719 
1720 	return 0;
1721 }
1722 
1723 /**
1724  * amdgpu_vm_update_prt_state - update the global PRT state
1725  *
1726  * @adev: amdgpu_device pointer
1727  */
1728 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1729 {
1730 	unsigned long flags;
1731 	bool enable;
1732 
1733 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1734 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1735 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1736 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1737 }
1738 
1739 /**
1740  * amdgpu_vm_prt_get - add a PRT user
1741  *
1742  * @adev: amdgpu_device pointer
1743  */
1744 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1745 {
1746 	if (!adev->gmc.gmc_funcs->set_prt)
1747 		return;
1748 
1749 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1750 		amdgpu_vm_update_prt_state(adev);
1751 }
1752 
1753 /**
1754  * amdgpu_vm_prt_put - drop a PRT user
1755  *
1756  * @adev: amdgpu_device pointer
1757  */
1758 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1759 {
1760 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1761 		amdgpu_vm_update_prt_state(adev);
1762 }
1763 
1764 /**
1765  * amdgpu_vm_prt_cb - callback for updating the PRT status
1766  *
1767  * @fence: fence for the callback
1768  * @_cb: the callback function
1769  */
1770 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1771 {
1772 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1773 
1774 	amdgpu_vm_prt_put(cb->adev);
1775 	kfree(cb);
1776 }
1777 
1778 /**
1779  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1780  *
1781  * @adev: amdgpu_device pointer
1782  * @fence: fence for the callback
1783  */
1784 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1785 				 struct dma_fence *fence)
1786 {
1787 	struct amdgpu_prt_cb *cb;
1788 
1789 	if (!adev->gmc.gmc_funcs->set_prt)
1790 		return;
1791 
1792 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1793 	if (!cb) {
1794 		/* Last resort when we are OOM */
1795 		if (fence)
1796 			dma_fence_wait(fence, false);
1797 
1798 		amdgpu_vm_prt_put(adev);
1799 	} else {
1800 		cb->adev = adev;
1801 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1802 						     amdgpu_vm_prt_cb))
1803 			amdgpu_vm_prt_cb(fence, &cb->cb);
1804 	}
1805 }
1806 
1807 /**
1808  * amdgpu_vm_free_mapping - free a mapping
1809  *
1810  * @adev: amdgpu_device pointer
1811  * @vm: requested vm
1812  * @mapping: mapping to be freed
1813  * @fence: fence of the unmap operation
1814  *
1815  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1816  */
1817 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1818 				   struct amdgpu_vm *vm,
1819 				   struct amdgpu_bo_va_mapping *mapping,
1820 				   struct dma_fence *fence)
1821 {
1822 	if (mapping->flags & AMDGPU_PTE_PRT)
1823 		amdgpu_vm_add_prt_cb(adev, fence);
1824 	kfree(mapping);
1825 }
1826 
1827 /**
1828  * amdgpu_vm_prt_fini - finish all prt mappings
1829  *
1830  * @adev: amdgpu_device pointer
1831  * @vm: requested vm
1832  *
1833  * Register a cleanup callback to disable PRT support after VM dies.
1834  */
1835 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1836 {
1837 	struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1838 	struct dma_fence *excl, **shared;
1839 	unsigned i, shared_count;
1840 	int r;
1841 
1842 	r = reservation_object_get_fences_rcu(resv, &excl,
1843 					      &shared_count, &shared);
1844 	if (r) {
1845 		/* Not enough memory to grab the fence list, as last resort
1846 		 * block for all the fences to complete.
1847 		 */
1848 		reservation_object_wait_timeout_rcu(resv, true, false,
1849 						    MAX_SCHEDULE_TIMEOUT);
1850 		return;
1851 	}
1852 
1853 	/* Add a callback for each fence in the reservation object */
1854 	amdgpu_vm_prt_get(adev);
1855 	amdgpu_vm_add_prt_cb(adev, excl);
1856 
1857 	for (i = 0; i < shared_count; ++i) {
1858 		amdgpu_vm_prt_get(adev);
1859 		amdgpu_vm_add_prt_cb(adev, shared[i]);
1860 	}
1861 
1862 	kfree(shared);
1863 }
1864 
1865 /**
1866  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1867  *
1868  * @adev: amdgpu_device pointer
1869  * @vm: requested vm
1870  * @fence: optional resulting fence (unchanged if no work needed to be done
1871  * or if an error occurred)
1872  *
1873  * Make sure all freed BOs are cleared in the PT.
1874  * PTs have to be reserved and mutex must be locked!
1875  *
1876  * Returns:
1877  * 0 for success.
1878  *
1879  */
1880 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1881 			  struct amdgpu_vm *vm,
1882 			  struct dma_fence **fence)
1883 {
1884 	struct amdgpu_bo_va_mapping *mapping;
1885 	uint64_t init_pte_value = 0;
1886 	struct dma_fence *f = NULL;
1887 	int r;
1888 
1889 	while (!list_empty(&vm->freed)) {
1890 		mapping = list_first_entry(&vm->freed,
1891 			struct amdgpu_bo_va_mapping, list);
1892 		list_del(&mapping->list);
1893 
1894 		if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1895 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1896 
1897 		r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1898 						mapping->start, mapping->last,
1899 						init_pte_value, 0, &f);
1900 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1901 		if (r) {
1902 			dma_fence_put(f);
1903 			return r;
1904 		}
1905 	}
1906 
1907 	if (fence && f) {
1908 		dma_fence_put(*fence);
1909 		*fence = f;
1910 	} else {
1911 		dma_fence_put(f);
1912 	}
1913 
1914 	return 0;
1915 
1916 }
1917 
1918 /**
1919  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1920  *
1921  * @adev: amdgpu_device pointer
1922  * @vm: requested vm
1923  *
1924  * Make sure all BOs which are moved are updated in the PTs.
1925  *
1926  * Returns:
1927  * 0 for success.
1928  *
1929  * PTs have to be reserved!
1930  */
1931 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1932 			   struct amdgpu_vm *vm)
1933 {
1934 	struct amdgpu_bo_va *bo_va, *tmp;
1935 	struct list_head moved;
1936 	bool clear;
1937 	int r;
1938 
1939 	INIT_LIST_HEAD(&moved);
1940 	spin_lock(&vm->moved_lock);
1941 	list_splice_init(&vm->moved, &moved);
1942 	spin_unlock(&vm->moved_lock);
1943 
1944 	list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
1945 		struct reservation_object *resv = bo_va->base.bo->tbo.resv;
1946 
1947 		/* Per VM BOs never need to bo cleared in the page tables */
1948 		if (resv == vm->root.base.bo->tbo.resv)
1949 			clear = false;
1950 		/* Try to reserve the BO to avoid clearing its ptes */
1951 		else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1952 			clear = false;
1953 		/* Somebody else is using the BO right now */
1954 		else
1955 			clear = true;
1956 
1957 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1958 		if (r) {
1959 			spin_lock(&vm->moved_lock);
1960 			list_splice(&moved, &vm->moved);
1961 			spin_unlock(&vm->moved_lock);
1962 			return r;
1963 		}
1964 
1965 		if (!clear && resv != vm->root.base.bo->tbo.resv)
1966 			reservation_object_unlock(resv);
1967 
1968 	}
1969 
1970 	return 0;
1971 }
1972 
1973 /**
1974  * amdgpu_vm_bo_add - add a bo to a specific vm
1975  *
1976  * @adev: amdgpu_device pointer
1977  * @vm: requested vm
1978  * @bo: amdgpu buffer object
1979  *
1980  * Add @bo into the requested vm.
1981  * Add @bo to the list of bos associated with the vm
1982  *
1983  * Returns:
1984  * Newly added bo_va or NULL for failure
1985  *
1986  * Object has to be reserved!
1987  */
1988 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1989 				      struct amdgpu_vm *vm,
1990 				      struct amdgpu_bo *bo)
1991 {
1992 	struct amdgpu_bo_va *bo_va;
1993 
1994 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1995 	if (bo_va == NULL) {
1996 		return NULL;
1997 	}
1998 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1999 
2000 	bo_va->ref_count = 1;
2001 	INIT_LIST_HEAD(&bo_va->valids);
2002 	INIT_LIST_HEAD(&bo_va->invalids);
2003 
2004 	return bo_va;
2005 }
2006 
2007 
2008 /**
2009  * amdgpu_vm_bo_insert_mapping - insert a new mapping
2010  *
2011  * @adev: amdgpu_device pointer
2012  * @bo_va: bo_va to store the address
2013  * @mapping: the mapping to insert
2014  *
2015  * Insert a new mapping into all structures.
2016  */
2017 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2018 				    struct amdgpu_bo_va *bo_va,
2019 				    struct amdgpu_bo_va_mapping *mapping)
2020 {
2021 	struct amdgpu_vm *vm = bo_va->base.vm;
2022 	struct amdgpu_bo *bo = bo_va->base.bo;
2023 
2024 	mapping->bo_va = bo_va;
2025 	list_add(&mapping->list, &bo_va->invalids);
2026 	amdgpu_vm_it_insert(mapping, &vm->va);
2027 
2028 	if (mapping->flags & AMDGPU_PTE_PRT)
2029 		amdgpu_vm_prt_get(adev);
2030 
2031 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2032 	    !bo_va->base.moved) {
2033 		spin_lock(&vm->moved_lock);
2034 		list_move(&bo_va->base.vm_status, &vm->moved);
2035 		spin_unlock(&vm->moved_lock);
2036 	}
2037 	trace_amdgpu_vm_bo_map(bo_va, mapping);
2038 }
2039 
2040 /**
2041  * amdgpu_vm_bo_map - map bo inside a vm
2042  *
2043  * @adev: amdgpu_device pointer
2044  * @bo_va: bo_va to store the address
2045  * @saddr: where to map the BO
2046  * @offset: requested offset in the BO
2047  * @size: BO size in bytes
2048  * @flags: attributes of pages (read/write/valid/etc.)
2049  *
2050  * Add a mapping of the BO at the specefied addr into the VM.
2051  *
2052  * Returns:
2053  * 0 for success, error for failure.
2054  *
2055  * Object has to be reserved and unreserved outside!
2056  */
2057 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2058 		     struct amdgpu_bo_va *bo_va,
2059 		     uint64_t saddr, uint64_t offset,
2060 		     uint64_t size, uint64_t flags)
2061 {
2062 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2063 	struct amdgpu_bo *bo = bo_va->base.bo;
2064 	struct amdgpu_vm *vm = bo_va->base.vm;
2065 	uint64_t eaddr;
2066 
2067 	/* validate the parameters */
2068 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2069 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2070 		return -EINVAL;
2071 
2072 	/* make sure object fit at this offset */
2073 	eaddr = saddr + size - 1;
2074 	if (saddr >= eaddr ||
2075 	    (bo && offset + size > amdgpu_bo_size(bo)))
2076 		return -EINVAL;
2077 
2078 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2079 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2080 
2081 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2082 	if (tmp) {
2083 		/* bo and tmp overlap, invalid addr */
2084 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2085 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2086 			tmp->start, tmp->last + 1);
2087 		return -EINVAL;
2088 	}
2089 
2090 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2091 	if (!mapping)
2092 		return -ENOMEM;
2093 
2094 	mapping->start = saddr;
2095 	mapping->last = eaddr;
2096 	mapping->offset = offset;
2097 	mapping->flags = flags;
2098 
2099 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2100 
2101 	return 0;
2102 }
2103 
2104 /**
2105  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2106  *
2107  * @adev: amdgpu_device pointer
2108  * @bo_va: bo_va to store the address
2109  * @saddr: where to map the BO
2110  * @offset: requested offset in the BO
2111  * @size: BO size in bytes
2112  * @flags: attributes of pages (read/write/valid/etc.)
2113  *
2114  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2115  * mappings as we do so.
2116  *
2117  * Returns:
2118  * 0 for success, error for failure.
2119  *
2120  * Object has to be reserved and unreserved outside!
2121  */
2122 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2123 			     struct amdgpu_bo_va *bo_va,
2124 			     uint64_t saddr, uint64_t offset,
2125 			     uint64_t size, uint64_t flags)
2126 {
2127 	struct amdgpu_bo_va_mapping *mapping;
2128 	struct amdgpu_bo *bo = bo_va->base.bo;
2129 	uint64_t eaddr;
2130 	int r;
2131 
2132 	/* validate the parameters */
2133 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2134 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2135 		return -EINVAL;
2136 
2137 	/* make sure object fit at this offset */
2138 	eaddr = saddr + size - 1;
2139 	if (saddr >= eaddr ||
2140 	    (bo && offset + size > amdgpu_bo_size(bo)))
2141 		return -EINVAL;
2142 
2143 	/* Allocate all the needed memory */
2144 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2145 	if (!mapping)
2146 		return -ENOMEM;
2147 
2148 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2149 	if (r) {
2150 		kfree(mapping);
2151 		return r;
2152 	}
2153 
2154 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2155 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2156 
2157 	mapping->start = saddr;
2158 	mapping->last = eaddr;
2159 	mapping->offset = offset;
2160 	mapping->flags = flags;
2161 
2162 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2163 
2164 	return 0;
2165 }
2166 
2167 /**
2168  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2169  *
2170  * @adev: amdgpu_device pointer
2171  * @bo_va: bo_va to remove the address from
2172  * @saddr: where to the BO is mapped
2173  *
2174  * Remove a mapping of the BO at the specefied addr from the VM.
2175  *
2176  * Returns:
2177  * 0 for success, error for failure.
2178  *
2179  * Object has to be reserved and unreserved outside!
2180  */
2181 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2182 		       struct amdgpu_bo_va *bo_va,
2183 		       uint64_t saddr)
2184 {
2185 	struct amdgpu_bo_va_mapping *mapping;
2186 	struct amdgpu_vm *vm = bo_va->base.vm;
2187 	bool valid = true;
2188 
2189 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2190 
2191 	list_for_each_entry(mapping, &bo_va->valids, list) {
2192 		if (mapping->start == saddr)
2193 			break;
2194 	}
2195 
2196 	if (&mapping->list == &bo_va->valids) {
2197 		valid = false;
2198 
2199 		list_for_each_entry(mapping, &bo_va->invalids, list) {
2200 			if (mapping->start == saddr)
2201 				break;
2202 		}
2203 
2204 		if (&mapping->list == &bo_va->invalids)
2205 			return -ENOENT;
2206 	}
2207 
2208 	list_del(&mapping->list);
2209 	amdgpu_vm_it_remove(mapping, &vm->va);
2210 	mapping->bo_va = NULL;
2211 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2212 
2213 	if (valid)
2214 		list_add(&mapping->list, &vm->freed);
2215 	else
2216 		amdgpu_vm_free_mapping(adev, vm, mapping,
2217 				       bo_va->last_pt_update);
2218 
2219 	return 0;
2220 }
2221 
2222 /**
2223  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2224  *
2225  * @adev: amdgpu_device pointer
2226  * @vm: VM structure to use
2227  * @saddr: start of the range
2228  * @size: size of the range
2229  *
2230  * Remove all mappings in a range, split them as appropriate.
2231  *
2232  * Returns:
2233  * 0 for success, error for failure.
2234  */
2235 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2236 				struct amdgpu_vm *vm,
2237 				uint64_t saddr, uint64_t size)
2238 {
2239 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2240 	LIST_HEAD(removed);
2241 	uint64_t eaddr;
2242 
2243 	eaddr = saddr + size - 1;
2244 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2245 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2246 
2247 	/* Allocate all the needed memory */
2248 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2249 	if (!before)
2250 		return -ENOMEM;
2251 	INIT_LIST_HEAD(&before->list);
2252 
2253 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2254 	if (!after) {
2255 		kfree(before);
2256 		return -ENOMEM;
2257 	}
2258 	INIT_LIST_HEAD(&after->list);
2259 
2260 	/* Now gather all removed mappings */
2261 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2262 	while (tmp) {
2263 		/* Remember mapping split at the start */
2264 		if (tmp->start < saddr) {
2265 			before->start = tmp->start;
2266 			before->last = saddr - 1;
2267 			before->offset = tmp->offset;
2268 			before->flags = tmp->flags;
2269 			before->bo_va = tmp->bo_va;
2270 			list_add(&before->list, &tmp->bo_va->invalids);
2271 		}
2272 
2273 		/* Remember mapping split at the end */
2274 		if (tmp->last > eaddr) {
2275 			after->start = eaddr + 1;
2276 			after->last = tmp->last;
2277 			after->offset = tmp->offset;
2278 			after->offset += after->start - tmp->start;
2279 			after->flags = tmp->flags;
2280 			after->bo_va = tmp->bo_va;
2281 			list_add(&after->list, &tmp->bo_va->invalids);
2282 		}
2283 
2284 		list_del(&tmp->list);
2285 		list_add(&tmp->list, &removed);
2286 
2287 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2288 	}
2289 
2290 	/* And free them up */
2291 	list_for_each_entry_safe(tmp, next, &removed, list) {
2292 		amdgpu_vm_it_remove(tmp, &vm->va);
2293 		list_del(&tmp->list);
2294 
2295 		if (tmp->start < saddr)
2296 		    tmp->start = saddr;
2297 		if (tmp->last > eaddr)
2298 		    tmp->last = eaddr;
2299 
2300 		tmp->bo_va = NULL;
2301 		list_add(&tmp->list, &vm->freed);
2302 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2303 	}
2304 
2305 	/* Insert partial mapping before the range */
2306 	if (!list_empty(&before->list)) {
2307 		amdgpu_vm_it_insert(before, &vm->va);
2308 		if (before->flags & AMDGPU_PTE_PRT)
2309 			amdgpu_vm_prt_get(adev);
2310 	} else {
2311 		kfree(before);
2312 	}
2313 
2314 	/* Insert partial mapping after the range */
2315 	if (!list_empty(&after->list)) {
2316 		amdgpu_vm_it_insert(after, &vm->va);
2317 		if (after->flags & AMDGPU_PTE_PRT)
2318 			amdgpu_vm_prt_get(adev);
2319 	} else {
2320 		kfree(after);
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 /**
2327  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2328  *
2329  * @vm: the requested VM
2330  * @addr: the address
2331  *
2332  * Find a mapping by it's address.
2333  *
2334  * Returns:
2335  * The amdgpu_bo_va_mapping matching for addr or NULL
2336  *
2337  */
2338 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2339 							 uint64_t addr)
2340 {
2341 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2342 }
2343 
2344 /**
2345  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2346  *
2347  * @adev: amdgpu_device pointer
2348  * @bo_va: requested bo_va
2349  *
2350  * Remove @bo_va->bo from the requested vm.
2351  *
2352  * Object have to be reserved!
2353  */
2354 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2355 		      struct amdgpu_bo_va *bo_va)
2356 {
2357 	struct amdgpu_bo_va_mapping *mapping, *next;
2358 	struct amdgpu_vm *vm = bo_va->base.vm;
2359 
2360 	list_del(&bo_va->base.bo_list);
2361 
2362 	spin_lock(&vm->moved_lock);
2363 	list_del(&bo_va->base.vm_status);
2364 	spin_unlock(&vm->moved_lock);
2365 
2366 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2367 		list_del(&mapping->list);
2368 		amdgpu_vm_it_remove(mapping, &vm->va);
2369 		mapping->bo_va = NULL;
2370 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2371 		list_add(&mapping->list, &vm->freed);
2372 	}
2373 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2374 		list_del(&mapping->list);
2375 		amdgpu_vm_it_remove(mapping, &vm->va);
2376 		amdgpu_vm_free_mapping(adev, vm, mapping,
2377 				       bo_va->last_pt_update);
2378 	}
2379 
2380 	dma_fence_put(bo_va->last_pt_update);
2381 	kfree(bo_va);
2382 }
2383 
2384 /**
2385  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2386  *
2387  * @adev: amdgpu_device pointer
2388  * @bo: amdgpu buffer object
2389  * @evicted: is the BO evicted
2390  *
2391  * Mark @bo as invalid.
2392  */
2393 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2394 			     struct amdgpu_bo *bo, bool evicted)
2395 {
2396 	struct amdgpu_vm_bo_base *bo_base;
2397 
2398 	/* shadow bo doesn't have bo base, its validation needs its parent */
2399 	if (bo->parent && bo->parent->shadow == bo)
2400 		bo = bo->parent;
2401 
2402 	list_for_each_entry(bo_base, &bo->va, bo_list) {
2403 		struct amdgpu_vm *vm = bo_base->vm;
2404 		bool was_moved = bo_base->moved;
2405 
2406 		bo_base->moved = true;
2407 		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2408 			if (bo->tbo.type == ttm_bo_type_kernel)
2409 				list_move(&bo_base->vm_status, &vm->evicted);
2410 			else
2411 				list_move_tail(&bo_base->vm_status,
2412 					       &vm->evicted);
2413 			continue;
2414 		}
2415 
2416 		if (was_moved)
2417 			continue;
2418 
2419 		if (bo->tbo.type == ttm_bo_type_kernel) {
2420 			list_move(&bo_base->vm_status, &vm->relocated);
2421 		} else {
2422 			spin_lock(&bo_base->vm->moved_lock);
2423 			list_move(&bo_base->vm_status, &vm->moved);
2424 			spin_unlock(&bo_base->vm->moved_lock);
2425 		}
2426 	}
2427 }
2428 
2429 /**
2430  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2431  *
2432  * @vm_size: VM size
2433  *
2434  * Returns:
2435  * VM page table as power of two
2436  */
2437 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2438 {
2439 	/* Total bits covered by PD + PTs */
2440 	unsigned bits = ilog2(vm_size) + 18;
2441 
2442 	/* Make sure the PD is 4K in size up to 8GB address space.
2443 	   Above that split equal between PD and PTs */
2444 	if (vm_size <= 8)
2445 		return (bits - 9);
2446 	else
2447 		return ((bits + 3) / 2);
2448 }
2449 
2450 /**
2451  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2452  *
2453  * @adev: amdgpu_device pointer
2454  * @vm_size: the default vm size if it's set auto
2455  * @fragment_size_default: Default PTE fragment size
2456  * @max_level: max VMPT level
2457  * @max_bits: max address space size in bits
2458  *
2459  */
2460 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2461 			   uint32_t fragment_size_default, unsigned max_level,
2462 			   unsigned max_bits)
2463 {
2464 	uint64_t tmp;
2465 
2466 	/* adjust vm size first */
2467 	if (amdgpu_vm_size != -1) {
2468 		unsigned max_size = 1 << (max_bits - 30);
2469 
2470 		vm_size = amdgpu_vm_size;
2471 		if (vm_size > max_size) {
2472 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2473 				 amdgpu_vm_size, max_size);
2474 			vm_size = max_size;
2475 		}
2476 	}
2477 
2478 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2479 
2480 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2481 	if (amdgpu_vm_block_size != -1)
2482 		tmp >>= amdgpu_vm_block_size - 9;
2483 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2484 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2485 	switch (adev->vm_manager.num_level) {
2486 	case 3:
2487 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2488 		break;
2489 	case 2:
2490 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2491 		break;
2492 	case 1:
2493 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2494 		break;
2495 	default:
2496 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2497 	}
2498 	/* block size depends on vm size and hw setup*/
2499 	if (amdgpu_vm_block_size != -1)
2500 		adev->vm_manager.block_size =
2501 			min((unsigned)amdgpu_vm_block_size, max_bits
2502 			    - AMDGPU_GPU_PAGE_SHIFT
2503 			    - 9 * adev->vm_manager.num_level);
2504 	else if (adev->vm_manager.num_level > 1)
2505 		adev->vm_manager.block_size = 9;
2506 	else
2507 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2508 
2509 	if (amdgpu_vm_fragment_size == -1)
2510 		adev->vm_manager.fragment_size = fragment_size_default;
2511 	else
2512 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2513 
2514 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2515 		 vm_size, adev->vm_manager.num_level + 1,
2516 		 adev->vm_manager.block_size,
2517 		 adev->vm_manager.fragment_size);
2518 }
2519 
2520 /**
2521  * amdgpu_vm_init - initialize a vm instance
2522  *
2523  * @adev: amdgpu_device pointer
2524  * @vm: requested vm
2525  * @vm_context: Indicates if it GFX or Compute context
2526  * @pasid: Process address space identifier
2527  *
2528  * Init @vm fields.
2529  *
2530  * Returns:
2531  * 0 for success, error for failure.
2532  */
2533 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2534 		   int vm_context, unsigned int pasid)
2535 {
2536 	struct amdgpu_bo_param bp;
2537 	struct amdgpu_bo *root;
2538 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2539 		AMDGPU_VM_PTE_COUNT(adev) * 8);
2540 	unsigned ring_instance;
2541 	struct amdgpu_ring *ring;
2542 	struct drm_sched_rq *rq;
2543 	unsigned long size;
2544 	uint64_t flags;
2545 	int r, i;
2546 
2547 	vm->va = RB_ROOT_CACHED;
2548 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2549 		vm->reserved_vmid[i] = NULL;
2550 	INIT_LIST_HEAD(&vm->evicted);
2551 	INIT_LIST_HEAD(&vm->relocated);
2552 	spin_lock_init(&vm->moved_lock);
2553 	INIT_LIST_HEAD(&vm->moved);
2554 	INIT_LIST_HEAD(&vm->idle);
2555 	INIT_LIST_HEAD(&vm->freed);
2556 
2557 	/* create scheduler entity for page table updates */
2558 
2559 	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2560 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
2561 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
2562 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2563 	r = drm_sched_entity_init(&ring->sched, &vm->entity,
2564 				  rq, NULL);
2565 	if (r)
2566 		return r;
2567 
2568 	vm->pte_support_ats = false;
2569 
2570 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2571 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2572 						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2573 
2574 		if (adev->asic_type == CHIP_RAVEN)
2575 			vm->pte_support_ats = true;
2576 	} else {
2577 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2578 						AMDGPU_VM_USE_CPU_FOR_GFX);
2579 	}
2580 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2581 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2582 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2583 		  "CPU update of VM recommended only for large BAR system\n");
2584 	vm->last_update = NULL;
2585 
2586 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2587 	if (vm->use_cpu_for_update)
2588 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2589 	else
2590 		flags |= AMDGPU_GEM_CREATE_SHADOW;
2591 
2592 	size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2593 	memset(&bp, 0, sizeof(bp));
2594 	bp.size = size;
2595 	bp.byte_align = align;
2596 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
2597 	bp.flags = flags;
2598 	bp.type = ttm_bo_type_kernel;
2599 	bp.resv = NULL;
2600 	r = amdgpu_bo_create(adev, &bp, &root);
2601 	if (r)
2602 		goto error_free_sched_entity;
2603 
2604 	r = amdgpu_bo_reserve(root, true);
2605 	if (r)
2606 		goto error_free_root;
2607 
2608 	r = amdgpu_vm_clear_bo(adev, vm, root,
2609 			       adev->vm_manager.root_level,
2610 			       vm->pte_support_ats);
2611 	if (r)
2612 		goto error_unreserve;
2613 
2614 	amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2615 	amdgpu_bo_unreserve(vm->root.base.bo);
2616 
2617 	if (pasid) {
2618 		unsigned long flags;
2619 
2620 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2621 		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2622 			      GFP_ATOMIC);
2623 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2624 		if (r < 0)
2625 			goto error_free_root;
2626 
2627 		vm->pasid = pasid;
2628 	}
2629 
2630 	INIT_KFIFO(vm->faults);
2631 	vm->fault_credit = 16;
2632 
2633 	return 0;
2634 
2635 error_unreserve:
2636 	amdgpu_bo_unreserve(vm->root.base.bo);
2637 
2638 error_free_root:
2639 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2640 	amdgpu_bo_unref(&vm->root.base.bo);
2641 	vm->root.base.bo = NULL;
2642 
2643 error_free_sched_entity:
2644 	drm_sched_entity_fini(&ring->sched, &vm->entity);
2645 
2646 	return r;
2647 }
2648 
2649 /**
2650  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2651  *
2652  * @adev: amdgpu_device pointer
2653  * @vm: requested vm
2654  *
2655  * This only works on GFX VMs that don't have any BOs added and no
2656  * page tables allocated yet.
2657  *
2658  * Changes the following VM parameters:
2659  * - use_cpu_for_update
2660  * - pte_supports_ats
2661  * - pasid (old PASID is released, because compute manages its own PASIDs)
2662  *
2663  * Reinitializes the page directory to reflect the changed ATS
2664  * setting. May leave behind an unused shadow BO for the page
2665  * directory when switching from SDMA updates to CPU updates.
2666  *
2667  * Returns:
2668  * 0 for success, -errno for errors.
2669  */
2670 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2671 {
2672 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2673 	int r;
2674 
2675 	r = amdgpu_bo_reserve(vm->root.base.bo, true);
2676 	if (r)
2677 		return r;
2678 
2679 	/* Sanity checks */
2680 	if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2681 		r = -EINVAL;
2682 		goto error;
2683 	}
2684 
2685 	/* Check if PD needs to be reinitialized and do it before
2686 	 * changing any other state, in case it fails.
2687 	 */
2688 	if (pte_support_ats != vm->pte_support_ats) {
2689 		r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2690 			       adev->vm_manager.root_level,
2691 			       pte_support_ats);
2692 		if (r)
2693 			goto error;
2694 	}
2695 
2696 	/* Update VM state */
2697 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2698 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2699 	vm->pte_support_ats = pte_support_ats;
2700 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2701 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2702 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2703 		  "CPU update of VM recommended only for large BAR system\n");
2704 
2705 	if (vm->pasid) {
2706 		unsigned long flags;
2707 
2708 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2709 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2710 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2711 
2712 		vm->pasid = 0;
2713 	}
2714 
2715 error:
2716 	amdgpu_bo_unreserve(vm->root.base.bo);
2717 	return r;
2718 }
2719 
2720 /**
2721  * amdgpu_vm_free_levels - free PD/PT levels
2722  *
2723  * @adev: amdgpu device structure
2724  * @parent: PD/PT starting level to free
2725  * @level: level of parent structure
2726  *
2727  * Free the page directory or page table level and all sub levels.
2728  */
2729 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2730 				  struct amdgpu_vm_pt *parent,
2731 				  unsigned level)
2732 {
2733 	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2734 
2735 	if (parent->base.bo) {
2736 		list_del(&parent->base.bo_list);
2737 		list_del(&parent->base.vm_status);
2738 		amdgpu_bo_unref(&parent->base.bo->shadow);
2739 		amdgpu_bo_unref(&parent->base.bo);
2740 	}
2741 
2742 	if (parent->entries)
2743 		for (i = 0; i < num_entries; i++)
2744 			amdgpu_vm_free_levels(adev, &parent->entries[i],
2745 					      level + 1);
2746 
2747 	kvfree(parent->entries);
2748 }
2749 
2750 /**
2751  * amdgpu_vm_fini - tear down a vm instance
2752  *
2753  * @adev: amdgpu_device pointer
2754  * @vm: requested vm
2755  *
2756  * Tear down @vm.
2757  * Unbind the VM and remove all bos from the vm bo list
2758  */
2759 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2760 {
2761 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2762 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2763 	struct amdgpu_bo *root;
2764 	u64 fault;
2765 	int i, r;
2766 
2767 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2768 
2769 	/* Clear pending page faults from IH when the VM is destroyed */
2770 	while (kfifo_get(&vm->faults, &fault))
2771 		amdgpu_ih_clear_fault(adev, fault);
2772 
2773 	if (vm->pasid) {
2774 		unsigned long flags;
2775 
2776 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2777 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2778 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2779 	}
2780 
2781 	drm_sched_entity_fini(vm->entity.sched, &vm->entity);
2782 
2783 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2784 		dev_err(adev->dev, "still active bo inside vm\n");
2785 	}
2786 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2787 					     &vm->va.rb_root, rb) {
2788 		list_del(&mapping->list);
2789 		amdgpu_vm_it_remove(mapping, &vm->va);
2790 		kfree(mapping);
2791 	}
2792 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2793 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2794 			amdgpu_vm_prt_fini(adev, vm);
2795 			prt_fini_needed = false;
2796 		}
2797 
2798 		list_del(&mapping->list);
2799 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2800 	}
2801 
2802 	root = amdgpu_bo_ref(vm->root.base.bo);
2803 	r = amdgpu_bo_reserve(root, true);
2804 	if (r) {
2805 		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2806 	} else {
2807 		amdgpu_vm_free_levels(adev, &vm->root,
2808 				      adev->vm_manager.root_level);
2809 		amdgpu_bo_unreserve(root);
2810 	}
2811 	amdgpu_bo_unref(&root);
2812 	dma_fence_put(vm->last_update);
2813 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2814 		amdgpu_vmid_free_reserved(adev, vm, i);
2815 }
2816 
2817 /**
2818  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2819  *
2820  * @adev: amdgpu_device pointer
2821  * @pasid: PASID do identify the VM
2822  *
2823  * This function is expected to be called in interrupt context.
2824  *
2825  * Returns:
2826  * True if there was fault credit, false otherwise
2827  */
2828 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2829 				  unsigned int pasid)
2830 {
2831 	struct amdgpu_vm *vm;
2832 
2833 	spin_lock(&adev->vm_manager.pasid_lock);
2834 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2835 	if (!vm) {
2836 		/* VM not found, can't track fault credit */
2837 		spin_unlock(&adev->vm_manager.pasid_lock);
2838 		return true;
2839 	}
2840 
2841 	/* No lock needed. only accessed by IRQ handler */
2842 	if (!vm->fault_credit) {
2843 		/* Too many faults in this VM */
2844 		spin_unlock(&adev->vm_manager.pasid_lock);
2845 		return false;
2846 	}
2847 
2848 	vm->fault_credit--;
2849 	spin_unlock(&adev->vm_manager.pasid_lock);
2850 	return true;
2851 }
2852 
2853 /**
2854  * amdgpu_vm_manager_init - init the VM manager
2855  *
2856  * @adev: amdgpu_device pointer
2857  *
2858  * Initialize the VM manager structures
2859  */
2860 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2861 {
2862 	unsigned i;
2863 
2864 	amdgpu_vmid_mgr_init(adev);
2865 
2866 	adev->vm_manager.fence_context =
2867 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2868 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2869 		adev->vm_manager.seqno[i] = 0;
2870 
2871 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2872 	spin_lock_init(&adev->vm_manager.prt_lock);
2873 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2874 
2875 	/* If not overridden by the user, by default, only in large BAR systems
2876 	 * Compute VM tables will be updated by CPU
2877 	 */
2878 #ifdef CONFIG_X86_64
2879 	if (amdgpu_vm_update_mode == -1) {
2880 		if (amdgpu_gmc_vram_full_visible(&adev->gmc))
2881 			adev->vm_manager.vm_update_mode =
2882 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2883 		else
2884 			adev->vm_manager.vm_update_mode = 0;
2885 	} else
2886 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2887 #else
2888 	adev->vm_manager.vm_update_mode = 0;
2889 #endif
2890 
2891 	idr_init(&adev->vm_manager.pasid_idr);
2892 	spin_lock_init(&adev->vm_manager.pasid_lock);
2893 }
2894 
2895 /**
2896  * amdgpu_vm_manager_fini - cleanup VM manager
2897  *
2898  * @adev: amdgpu_device pointer
2899  *
2900  * Cleanup the VM manager and free resources.
2901  */
2902 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2903 {
2904 	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
2905 	idr_destroy(&adev->vm_manager.pasid_idr);
2906 
2907 	amdgpu_vmid_mgr_fini(adev);
2908 }
2909 
2910 /**
2911  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2912  *
2913  * @dev: drm device pointer
2914  * @data: drm_amdgpu_vm
2915  * @filp: drm file pointer
2916  *
2917  * Returns:
2918  * 0 for success, -errno for errors.
2919  */
2920 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2921 {
2922 	union drm_amdgpu_vm *args = data;
2923 	struct amdgpu_device *adev = dev->dev_private;
2924 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2925 	int r;
2926 
2927 	switch (args->in.op) {
2928 	case AMDGPU_VM_OP_RESERVE_VMID:
2929 		/* current, we only have requirement to reserve vmid from gfxhub */
2930 		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2931 		if (r)
2932 			return r;
2933 		break;
2934 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2935 		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2936 		break;
2937 	default:
2938 		return -EINVAL;
2939 	}
2940 
2941 	return 0;
2942 }
2943