1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <[email protected]>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <linux/dma-buf.h>
35 
36 #include <drm/amdgpu_drm.h>
37 #include <drm/drm_cache.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_amdkfd.h"
41 
42 /**
43  * DOC: amdgpu_object
44  *
45  * This defines the interfaces to operate on an &amdgpu_bo buffer object which
46  * represents memory used by driver (VRAM, system memory, etc.). The driver
47  * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
48  * to create/destroy/set buffer object which are then managed by the kernel TTM
49  * memory manager.
50  * The interfaces are also used internally by kernel clients, including gfx,
51  * uvd, etc. for kernel managed allocations used by the GPU.
52  *
53  */
54 
55 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
56 {
57 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
58 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
59 	struct amdgpu_bo_user *ubo;
60 
61 	amdgpu_bo_kunmap(bo);
62 
63 	if (bo->tbo.base.import_attach)
64 		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
65 	drm_gem_object_release(&bo->tbo.base);
66 	/* in case amdgpu_device_recover_vram got NULL of bo->parent */
67 	if (!list_empty(&bo->shadow_list)) {
68 		mutex_lock(&adev->shadow_list_lock);
69 		list_del_init(&bo->shadow_list);
70 		mutex_unlock(&adev->shadow_list_lock);
71 	}
72 	amdgpu_bo_unref(&bo->parent);
73 
74 	if (bo->tbo.type == ttm_bo_type_device) {
75 		ubo = to_amdgpu_bo_user(bo);
76 		kfree(ubo->metadata);
77 	}
78 
79 	kfree(bo);
80 }
81 
82 /**
83  * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
84  * @bo: buffer object to be checked
85  *
86  * Uses destroy function associated with the object to determine if this is
87  * an &amdgpu_bo.
88  *
89  * Returns:
90  * true if the object belongs to &amdgpu_bo, false if not.
91  */
92 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
93 {
94 	if (bo->destroy == &amdgpu_bo_destroy)
95 		return true;
96 	return false;
97 }
98 
99 /**
100  * amdgpu_bo_placement_from_domain - set buffer's placement
101  * @abo: &amdgpu_bo buffer object whose placement is to be set
102  * @domain: requested domain
103  *
104  * Sets buffer's placement according to requested domain and the buffer's
105  * flags.
106  */
107 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
108 {
109 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
110 	struct ttm_placement *placement = &abo->placement;
111 	struct ttm_place *places = abo->placements;
112 	u64 flags = abo->flags;
113 	u32 c = 0;
114 
115 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
116 		unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
117 
118 		places[c].fpfn = 0;
119 		places[c].lpfn = 0;
120 		places[c].mem_type = TTM_PL_VRAM;
121 		places[c].flags = 0;
122 
123 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
124 			places[c].lpfn = visible_pfn;
125 		else
126 			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
127 
128 		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
129 			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
130 		c++;
131 	}
132 
133 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
134 		places[c].fpfn = 0;
135 		places[c].lpfn = 0;
136 		places[c].mem_type = TTM_PL_TT;
137 		places[c].flags = 0;
138 		c++;
139 	}
140 
141 	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
142 		places[c].fpfn = 0;
143 		places[c].lpfn = 0;
144 		places[c].mem_type = TTM_PL_SYSTEM;
145 		places[c].flags = 0;
146 		c++;
147 	}
148 
149 	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
150 		places[c].fpfn = 0;
151 		places[c].lpfn = 0;
152 		places[c].mem_type = AMDGPU_PL_GDS;
153 		places[c].flags = 0;
154 		c++;
155 	}
156 
157 	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
158 		places[c].fpfn = 0;
159 		places[c].lpfn = 0;
160 		places[c].mem_type = AMDGPU_PL_GWS;
161 		places[c].flags = 0;
162 		c++;
163 	}
164 
165 	if (domain & AMDGPU_GEM_DOMAIN_OA) {
166 		places[c].fpfn = 0;
167 		places[c].lpfn = 0;
168 		places[c].mem_type = AMDGPU_PL_OA;
169 		places[c].flags = 0;
170 		c++;
171 	}
172 
173 	if (!c) {
174 		places[c].fpfn = 0;
175 		places[c].lpfn = 0;
176 		places[c].mem_type = TTM_PL_SYSTEM;
177 		places[c].flags = 0;
178 		c++;
179 	}
180 
181 	BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
182 
183 	placement->num_placement = c;
184 	placement->placement = places;
185 
186 	placement->num_busy_placement = c;
187 	placement->busy_placement = places;
188 }
189 
190 /**
191  * amdgpu_bo_create_reserved - create reserved BO for kernel use
192  *
193  * @adev: amdgpu device object
194  * @size: size for the new BO
195  * @align: alignment for the new BO
196  * @domain: where to place it
197  * @bo_ptr: used to initialize BOs in structures
198  * @gpu_addr: GPU addr of the pinned BO
199  * @cpu_addr: optional CPU address mapping
200  *
201  * Allocates and pins a BO for kernel internal use, and returns it still
202  * reserved.
203  *
204  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
205  *
206  * Returns:
207  * 0 on success, negative error code otherwise.
208  */
209 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
210 			      unsigned long size, int align,
211 			      u32 domain, struct amdgpu_bo **bo_ptr,
212 			      u64 *gpu_addr, void **cpu_addr)
213 {
214 	struct amdgpu_bo_param bp;
215 	bool free = false;
216 	int r;
217 
218 	if (!size) {
219 		amdgpu_bo_unref(bo_ptr);
220 		return 0;
221 	}
222 
223 	memset(&bp, 0, sizeof(bp));
224 	bp.size = size;
225 	bp.byte_align = align;
226 	bp.domain = domain;
227 	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
228 		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
229 	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
230 	bp.type = ttm_bo_type_kernel;
231 	bp.resv = NULL;
232 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
233 
234 	if (!*bo_ptr) {
235 		r = amdgpu_bo_create(adev, &bp, bo_ptr);
236 		if (r) {
237 			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
238 				r);
239 			return r;
240 		}
241 		free = true;
242 	}
243 
244 	r = amdgpu_bo_reserve(*bo_ptr, false);
245 	if (r) {
246 		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
247 		goto error_free;
248 	}
249 
250 	r = amdgpu_bo_pin(*bo_ptr, domain);
251 	if (r) {
252 		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
253 		goto error_unreserve;
254 	}
255 
256 	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
257 	if (r) {
258 		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
259 		goto error_unpin;
260 	}
261 
262 	if (gpu_addr)
263 		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
264 
265 	if (cpu_addr) {
266 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
267 		if (r) {
268 			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
269 			goto error_unpin;
270 		}
271 	}
272 
273 	return 0;
274 
275 error_unpin:
276 	amdgpu_bo_unpin(*bo_ptr);
277 error_unreserve:
278 	amdgpu_bo_unreserve(*bo_ptr);
279 
280 error_free:
281 	if (free)
282 		amdgpu_bo_unref(bo_ptr);
283 
284 	return r;
285 }
286 
287 /**
288  * amdgpu_bo_create_kernel - create BO for kernel use
289  *
290  * @adev: amdgpu device object
291  * @size: size for the new BO
292  * @align: alignment for the new BO
293  * @domain: where to place it
294  * @bo_ptr:  used to initialize BOs in structures
295  * @gpu_addr: GPU addr of the pinned BO
296  * @cpu_addr: optional CPU address mapping
297  *
298  * Allocates and pins a BO for kernel internal use.
299  *
300  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
301  *
302  * Returns:
303  * 0 on success, negative error code otherwise.
304  */
305 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
306 			    unsigned long size, int align,
307 			    u32 domain, struct amdgpu_bo **bo_ptr,
308 			    u64 *gpu_addr, void **cpu_addr)
309 {
310 	int r;
311 
312 	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
313 				      gpu_addr, cpu_addr);
314 
315 	if (r)
316 		return r;
317 
318 	if (*bo_ptr)
319 		amdgpu_bo_unreserve(*bo_ptr);
320 
321 	return 0;
322 }
323 
324 /**
325  * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
326  *
327  * @adev: amdgpu device object
328  * @offset: offset of the BO
329  * @size: size of the BO
330  * @domain: where to place it
331  * @bo_ptr:  used to initialize BOs in structures
332  * @cpu_addr: optional CPU address mapping
333  *
334  * Creates a kernel BO at a specific offset in the address space of the domain.
335  *
336  * Returns:
337  * 0 on success, negative error code otherwise.
338  */
339 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
340 			       uint64_t offset, uint64_t size, uint32_t domain,
341 			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
342 {
343 	struct ttm_operation_ctx ctx = { false, false };
344 	unsigned int i;
345 	int r;
346 
347 	offset &= PAGE_MASK;
348 	size = ALIGN(size, PAGE_SIZE);
349 
350 	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
351 				      NULL, cpu_addr);
352 	if (r)
353 		return r;
354 
355 	if ((*bo_ptr) == NULL)
356 		return 0;
357 
358 	/*
359 	 * Remove the original mem node and create a new one at the request
360 	 * position.
361 	 */
362 	if (cpu_addr)
363 		amdgpu_bo_kunmap(*bo_ptr);
364 
365 	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
366 
367 	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
368 		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
369 		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
370 	}
371 	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
372 			     &(*bo_ptr)->tbo.mem, &ctx);
373 	if (r)
374 		goto error;
375 
376 	if (cpu_addr) {
377 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
378 		if (r)
379 			goto error;
380 	}
381 
382 	amdgpu_bo_unreserve(*bo_ptr);
383 	return 0;
384 
385 error:
386 	amdgpu_bo_unreserve(*bo_ptr);
387 	amdgpu_bo_unref(bo_ptr);
388 	return r;
389 }
390 
391 /**
392  * amdgpu_bo_free_kernel - free BO for kernel use
393  *
394  * @bo: amdgpu BO to free
395  * @gpu_addr: pointer to where the BO's GPU memory space address was stored
396  * @cpu_addr: pointer to where the BO's CPU memory space address was stored
397  *
398  * unmaps and unpin a BO for kernel internal use.
399  */
400 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
401 			   void **cpu_addr)
402 {
403 	if (*bo == NULL)
404 		return;
405 
406 	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
407 		if (cpu_addr)
408 			amdgpu_bo_kunmap(*bo);
409 
410 		amdgpu_bo_unpin(*bo);
411 		amdgpu_bo_unreserve(*bo);
412 	}
413 	amdgpu_bo_unref(bo);
414 
415 	if (gpu_addr)
416 		*gpu_addr = 0;
417 
418 	if (cpu_addr)
419 		*cpu_addr = NULL;
420 }
421 
422 /* Validate bo size is bit bigger then the request domain */
423 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
424 					  unsigned long size, u32 domain)
425 {
426 	struct ttm_resource_manager *man = NULL;
427 
428 	/*
429 	 * If GTT is part of requested domains the check must succeed to
430 	 * allow fall back to GTT
431 	 */
432 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
433 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
434 
435 		if (size < (man->size << PAGE_SHIFT))
436 			return true;
437 		else
438 			goto fail;
439 	}
440 
441 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
442 		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
443 
444 		if (size < (man->size << PAGE_SHIFT))
445 			return true;
446 		else
447 			goto fail;
448 	}
449 
450 
451 	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
452 	return true;
453 
454 fail:
455 	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
456 		  man->size << PAGE_SHIFT);
457 	return false;
458 }
459 
460 bool amdgpu_bo_support_uswc(u64 bo_flags)
461 {
462 
463 #ifdef CONFIG_X86_32
464 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
465 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
466 	 */
467 	return false;
468 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
469 	/* Don't try to enable write-combining when it can't work, or things
470 	 * may be slow
471 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
472 	 */
473 
474 #ifndef CONFIG_COMPILE_TEST
475 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
476 	 thanks to write-combining
477 #endif
478 
479 	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
480 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
481 			      "better performance thanks to write-combining\n");
482 	return false;
483 #else
484 	/* For architectures that don't support WC memory,
485 	 * mask out the WC flag from the BO
486 	 */
487 	if (!drm_arch_can_wc_memory())
488 		return false;
489 
490 	return true;
491 #endif
492 }
493 
494 static int amdgpu_bo_do_create(struct amdgpu_device *adev,
495 			       struct amdgpu_bo_param *bp,
496 			       struct amdgpu_bo **bo_ptr)
497 {
498 	struct ttm_operation_ctx ctx = {
499 		.interruptible = (bp->type != ttm_bo_type_kernel),
500 		.no_wait_gpu = bp->no_wait_gpu,
501 		/* We opt to avoid OOM on system pages allocations */
502 		.gfp_retry_mayfail = true,
503 		.allow_res_evict = bp->type != ttm_bo_type_kernel,
504 		.resv = bp->resv
505 	};
506 	struct amdgpu_bo *bo;
507 	unsigned long page_align, size = bp->size;
508 	int r;
509 
510 	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
511 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
512 		/* GWS and OA don't need any alignment. */
513 		page_align = bp->byte_align;
514 		size <<= PAGE_SHIFT;
515 	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
516 		/* Both size and alignment must be a multiple of 4. */
517 		page_align = ALIGN(bp->byte_align, 4);
518 		size = ALIGN(size, 4) << PAGE_SHIFT;
519 	} else {
520 		/* Memory should be aligned at least to a page size. */
521 		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
522 		size = ALIGN(size, PAGE_SIZE);
523 	}
524 
525 	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
526 		return -ENOMEM;
527 
528 	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
529 
530 	*bo_ptr = NULL;
531 	bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
532 	if (bo == NULL)
533 		return -ENOMEM;
534 	drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
535 	INIT_LIST_HEAD(&bo->shadow_list);
536 	bo->vm_bo = NULL;
537 	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
538 		bp->domain;
539 	bo->allowed_domains = bo->preferred_domains;
540 	if (bp->type != ttm_bo_type_kernel &&
541 	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
542 		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
543 
544 	bo->flags = bp->flags;
545 
546 	if (!amdgpu_bo_support_uswc(bo->flags))
547 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
548 
549 	bo->tbo.bdev = &adev->mman.bdev;
550 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
551 			  AMDGPU_GEM_DOMAIN_GDS))
552 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
553 	else
554 		amdgpu_bo_placement_from_domain(bo, bp->domain);
555 	if (bp->type == ttm_bo_type_kernel)
556 		bo->tbo.priority = 1;
557 
558 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
559 				 &bo->placement, page_align, &ctx,  NULL,
560 				 bp->resv, &amdgpu_bo_destroy);
561 	if (unlikely(r != 0))
562 		return r;
563 
564 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
565 	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
566 	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
567 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
568 					     ctx.bytes_moved);
569 	else
570 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
571 
572 	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
573 	    bo->tbo.mem.mem_type == TTM_PL_VRAM) {
574 		struct dma_fence *fence;
575 
576 		r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
577 		if (unlikely(r))
578 			goto fail_unreserve;
579 
580 		amdgpu_bo_fence(bo, fence, false);
581 		dma_fence_put(bo->tbo.moving);
582 		bo->tbo.moving = dma_fence_get(fence);
583 		dma_fence_put(fence);
584 	}
585 	if (!bp->resv)
586 		amdgpu_bo_unreserve(bo);
587 	*bo_ptr = bo;
588 
589 	trace_amdgpu_bo_create(bo);
590 
591 	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
592 	if (bp->type == ttm_bo_type_device)
593 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
594 
595 	return 0;
596 
597 fail_unreserve:
598 	if (!bp->resv)
599 		dma_resv_unlock(bo->tbo.base.resv);
600 	amdgpu_bo_unref(&bo);
601 	return r;
602 }
603 
604 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
605 				   unsigned long size,
606 				   struct amdgpu_bo *bo)
607 {
608 	struct amdgpu_bo_param bp;
609 	int r;
610 
611 	if (bo->shadow)
612 		return 0;
613 
614 	memset(&bp, 0, sizeof(bp));
615 	bp.size = size;
616 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
617 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
618 		AMDGPU_GEM_CREATE_SHADOW;
619 	bp.type = ttm_bo_type_kernel;
620 	bp.resv = bo->tbo.base.resv;
621 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
622 
623 	r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
624 	if (!r) {
625 		bo->shadow->parent = amdgpu_bo_ref(bo);
626 		mutex_lock(&adev->shadow_list_lock);
627 		list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
628 		mutex_unlock(&adev->shadow_list_lock);
629 	}
630 
631 	return r;
632 }
633 
634 /**
635  * amdgpu_bo_create - create an &amdgpu_bo buffer object
636  * @adev: amdgpu device object
637  * @bp: parameters to be used for the buffer object
638  * @bo_ptr: pointer to the buffer object pointer
639  *
640  * Creates an &amdgpu_bo buffer object; and if requested, also creates a
641  * shadow object.
642  * Shadow object is used to backup the original buffer object, and is always
643  * in GTT.
644  *
645  * Returns:
646  * 0 for success or a negative error code on failure.
647  */
648 int amdgpu_bo_create(struct amdgpu_device *adev,
649 		     struct amdgpu_bo_param *bp,
650 		     struct amdgpu_bo **bo_ptr)
651 {
652 	u64 flags = bp->flags;
653 	int r;
654 
655 	bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
656 
657 	r = amdgpu_bo_do_create(adev, bp, bo_ptr);
658 	if (r)
659 		return r;
660 
661 	if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
662 		if (!bp->resv)
663 			WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
664 							NULL));
665 
666 		r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
667 
668 		if (!bp->resv)
669 			dma_resv_unlock((*bo_ptr)->tbo.base.resv);
670 
671 		if (r)
672 			amdgpu_bo_unref(bo_ptr);
673 	}
674 
675 	return r;
676 }
677 
678 /**
679  * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
680  * @adev: amdgpu device object
681  * @bp: parameters to be used for the buffer object
682  * @ubo_ptr: pointer to the buffer object pointer
683  *
684  * Create a BO to be used by user application;
685  *
686  * Returns:
687  * 0 for success or a negative error code on failure.
688  */
689 
690 int amdgpu_bo_create_user(struct amdgpu_device *adev,
691 			  struct amdgpu_bo_param *bp,
692 			  struct amdgpu_bo_user **ubo_ptr)
693 {
694 	struct amdgpu_bo *bo_ptr;
695 	int r;
696 
697 	bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
698 	bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
699 	r = amdgpu_bo_do_create(adev, bp, &bo_ptr);
700 	if (r)
701 		return r;
702 
703 	*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
704 	return r;
705 }
706 /**
707  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
708  * @bo: pointer to the buffer object
709  *
710  * Sets placement according to domain; and changes placement and caching
711  * policy of the buffer object according to the placement.
712  * This is used for validating shadow bos.  It calls ttm_bo_validate() to
713  * make sure the buffer is resident where it needs to be.
714  *
715  * Returns:
716  * 0 for success or a negative error code on failure.
717  */
718 int amdgpu_bo_validate(struct amdgpu_bo *bo)
719 {
720 	struct ttm_operation_ctx ctx = { false, false };
721 	uint32_t domain;
722 	int r;
723 
724 	if (bo->tbo.pin_count)
725 		return 0;
726 
727 	domain = bo->preferred_domains;
728 
729 retry:
730 	amdgpu_bo_placement_from_domain(bo, domain);
731 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
732 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
733 		domain = bo->allowed_domains;
734 		goto retry;
735 	}
736 
737 	return r;
738 }
739 
740 /**
741  * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
742  *
743  * @shadow: &amdgpu_bo shadow to be restored
744  * @fence: dma_fence associated with the operation
745  *
746  * Copies a buffer object's shadow content back to the object.
747  * This is used for recovering a buffer from its shadow in case of a gpu
748  * reset where vram context may be lost.
749  *
750  * Returns:
751  * 0 for success or a negative error code on failure.
752  */
753 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
754 
755 {
756 	struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
757 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
758 	uint64_t shadow_addr, parent_addr;
759 
760 	shadow_addr = amdgpu_bo_gpu_offset(shadow);
761 	parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
762 
763 	return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
764 				  amdgpu_bo_size(shadow), NULL, fence,
765 				  true, false, false);
766 }
767 
768 /**
769  * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
770  * @bo: &amdgpu_bo buffer object to be mapped
771  * @ptr: kernel virtual address to be returned
772  *
773  * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
774  * amdgpu_bo_kptr() to get the kernel virtual address.
775  *
776  * Returns:
777  * 0 for success or a negative error code on failure.
778  */
779 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
780 {
781 	void *kptr;
782 	long r;
783 
784 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
785 		return -EPERM;
786 
787 	kptr = amdgpu_bo_kptr(bo);
788 	if (kptr) {
789 		if (ptr)
790 			*ptr = kptr;
791 		return 0;
792 	}
793 
794 	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
795 						MAX_SCHEDULE_TIMEOUT);
796 	if (r < 0)
797 		return r;
798 
799 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
800 	if (r)
801 		return r;
802 
803 	if (ptr)
804 		*ptr = amdgpu_bo_kptr(bo);
805 
806 	return 0;
807 }
808 
809 /**
810  * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
811  * @bo: &amdgpu_bo buffer object
812  *
813  * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
814  *
815  * Returns:
816  * the virtual address of a buffer object area.
817  */
818 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
819 {
820 	bool is_iomem;
821 
822 	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
823 }
824 
825 /**
826  * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
827  * @bo: &amdgpu_bo buffer object to be unmapped
828  *
829  * Unmaps a kernel map set up by amdgpu_bo_kmap().
830  */
831 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
832 {
833 	if (bo->kmap.bo)
834 		ttm_bo_kunmap(&bo->kmap);
835 }
836 
837 /**
838  * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
839  * @bo: &amdgpu_bo buffer object
840  *
841  * References the contained &ttm_buffer_object.
842  *
843  * Returns:
844  * a refcounted pointer to the &amdgpu_bo buffer object.
845  */
846 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
847 {
848 	if (bo == NULL)
849 		return NULL;
850 
851 	ttm_bo_get(&bo->tbo);
852 	return bo;
853 }
854 
855 /**
856  * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
857  * @bo: &amdgpu_bo buffer object
858  *
859  * Unreferences the contained &ttm_buffer_object and clear the pointer
860  */
861 void amdgpu_bo_unref(struct amdgpu_bo **bo)
862 {
863 	struct ttm_buffer_object *tbo;
864 
865 	if ((*bo) == NULL)
866 		return;
867 
868 	tbo = &((*bo)->tbo);
869 	ttm_bo_put(tbo);
870 	*bo = NULL;
871 }
872 
873 /**
874  * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
875  * @bo: &amdgpu_bo buffer object to be pinned
876  * @domain: domain to be pinned to
877  * @min_offset: the start of requested address range
878  * @max_offset: the end of requested address range
879  *
880  * Pins the buffer object according to requested domain and address range. If
881  * the memory is unbound gart memory, binds the pages into gart table. Adjusts
882  * pin_count and pin_size accordingly.
883  *
884  * Pinning means to lock pages in memory along with keeping them at a fixed
885  * offset. It is required when a buffer can not be moved, for example, when
886  * a display buffer is being scanned out.
887  *
888  * Compared with amdgpu_bo_pin(), this function gives more flexibility on
889  * where to pin a buffer if there are specific restrictions on where a buffer
890  * must be located.
891  *
892  * Returns:
893  * 0 for success or a negative error code on failure.
894  */
895 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
896 			     u64 min_offset, u64 max_offset)
897 {
898 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
899 	struct ttm_operation_ctx ctx = { false, false };
900 	int r, i;
901 
902 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
903 		return -EPERM;
904 
905 	if (WARN_ON_ONCE(min_offset > max_offset))
906 		return -EINVAL;
907 
908 	/* A shared bo cannot be migrated to VRAM */
909 	if (bo->prime_shared_count || bo->tbo.base.import_attach) {
910 		if (domain & AMDGPU_GEM_DOMAIN_GTT)
911 			domain = AMDGPU_GEM_DOMAIN_GTT;
912 		else
913 			return -EINVAL;
914 	}
915 
916 	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
917 	 * See function amdgpu_display_supported_domains()
918 	 */
919 	domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
920 
921 	if (bo->tbo.pin_count) {
922 		uint32_t mem_type = bo->tbo.mem.mem_type;
923 		uint32_t mem_flags = bo->tbo.mem.placement;
924 
925 		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
926 			return -EINVAL;
927 
928 		if ((mem_type == TTM_PL_VRAM) &&
929 		    (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
930 		    !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
931 			return -EINVAL;
932 
933 		ttm_bo_pin(&bo->tbo);
934 
935 		if (max_offset != 0) {
936 			u64 domain_start = amdgpu_ttm_domain_start(adev,
937 								   mem_type);
938 			WARN_ON_ONCE(max_offset <
939 				     (amdgpu_bo_gpu_offset(bo) - domain_start));
940 		}
941 
942 		return 0;
943 	}
944 
945 	if (bo->tbo.base.import_attach)
946 		dma_buf_pin(bo->tbo.base.import_attach);
947 
948 	/* force to pin into visible video ram */
949 	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
950 		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
951 	amdgpu_bo_placement_from_domain(bo, domain);
952 	for (i = 0; i < bo->placement.num_placement; i++) {
953 		unsigned fpfn, lpfn;
954 
955 		fpfn = min_offset >> PAGE_SHIFT;
956 		lpfn = max_offset >> PAGE_SHIFT;
957 
958 		if (fpfn > bo->placements[i].fpfn)
959 			bo->placements[i].fpfn = fpfn;
960 		if (!bo->placements[i].lpfn ||
961 		    (lpfn && lpfn < bo->placements[i].lpfn))
962 			bo->placements[i].lpfn = lpfn;
963 	}
964 
965 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
966 	if (unlikely(r)) {
967 		dev_err(adev->dev, "%p pin failed\n", bo);
968 		goto error;
969 	}
970 
971 	ttm_bo_pin(&bo->tbo);
972 
973 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
974 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
975 		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
976 		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
977 			     &adev->visible_pin_size);
978 	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
979 		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
980 	}
981 
982 error:
983 	return r;
984 }
985 
986 /**
987  * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
988  * @bo: &amdgpu_bo buffer object to be pinned
989  * @domain: domain to be pinned to
990  *
991  * A simple wrapper to amdgpu_bo_pin_restricted().
992  * Provides a simpler API for buffers that do not have any strict restrictions
993  * on where a buffer must be located.
994  *
995  * Returns:
996  * 0 for success or a negative error code on failure.
997  */
998 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
999 {
1000 	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1001 	return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1002 }
1003 
1004 /**
1005  * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
1006  * @bo: &amdgpu_bo buffer object to be unpinned
1007  *
1008  * Decreases the pin_count, and clears the flags if pin_count reaches 0.
1009  * Changes placement and pin size accordingly.
1010  *
1011  * Returns:
1012  * 0 for success or a negative error code on failure.
1013  */
1014 void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1015 {
1016 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1017 
1018 	ttm_bo_unpin(&bo->tbo);
1019 	if (bo->tbo.pin_count)
1020 		return;
1021 
1022 	if (bo->tbo.base.import_attach)
1023 		dma_buf_unpin(bo->tbo.base.import_attach);
1024 
1025 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
1026 		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
1027 		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
1028 			     &adev->visible_pin_size);
1029 	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
1030 		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
1031 	}
1032 }
1033 
1034 /**
1035  * amdgpu_bo_evict_vram - evict VRAM buffers
1036  * @adev: amdgpu device object
1037  *
1038  * Evicts all VRAM buffers on the lru list of the memory type.
1039  * Mainly used for evicting vram at suspend time.
1040  *
1041  * Returns:
1042  * 0 for success or a negative error code on failure.
1043  */
1044 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1045 {
1046 	struct ttm_resource_manager *man;
1047 
1048 	if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
1049 		/* No need to evict vram on APUs for suspend to ram */
1050 		return 0;
1051 	}
1052 
1053 	man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1054 	return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
1055 }
1056 
1057 static const char *amdgpu_vram_names[] = {
1058 	"UNKNOWN",
1059 	"GDDR1",
1060 	"DDR2",
1061 	"GDDR3",
1062 	"GDDR4",
1063 	"GDDR5",
1064 	"HBM",
1065 	"DDR3",
1066 	"DDR4",
1067 	"GDDR6",
1068 	"DDR5"
1069 };
1070 
1071 /**
1072  * amdgpu_bo_init - initialize memory manager
1073  * @adev: amdgpu device object
1074  *
1075  * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1076  *
1077  * Returns:
1078  * 0 for success or a negative error code on failure.
1079  */
1080 int amdgpu_bo_init(struct amdgpu_device *adev)
1081 {
1082 	/* On A+A platform, VRAM can be mapped as WB */
1083 	if (!adev->gmc.xgmi.connected_to_cpu) {
1084 		/* reserve PAT memory space to WC for VRAM */
1085 		arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1086 				adev->gmc.aper_size);
1087 
1088 		/* Add an MTRR for the VRAM */
1089 		adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1090 				adev->gmc.aper_size);
1091 	}
1092 
1093 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1094 		 adev->gmc.mc_vram_size >> 20,
1095 		 (unsigned long long)adev->gmc.aper_size >> 20);
1096 	DRM_INFO("RAM width %dbits %s\n",
1097 		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1098 	return amdgpu_ttm_init(adev);
1099 }
1100 
1101 /**
1102  * amdgpu_bo_fini - tear down memory manager
1103  * @adev: amdgpu device object
1104  *
1105  * Reverses amdgpu_bo_init() to tear down memory manager.
1106  */
1107 void amdgpu_bo_fini(struct amdgpu_device *adev)
1108 {
1109 	amdgpu_ttm_fini(adev);
1110 	if (!adev->gmc.xgmi.connected_to_cpu) {
1111 		arch_phys_wc_del(adev->gmc.vram_mtrr);
1112 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1113 	}
1114 }
1115 
1116 /**
1117  * amdgpu_bo_set_tiling_flags - set tiling flags
1118  * @bo: &amdgpu_bo buffer object
1119  * @tiling_flags: new flags
1120  *
1121  * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1122  * kernel driver to set the tiling flags on a buffer.
1123  *
1124  * Returns:
1125  * 0 for success or a negative error code on failure.
1126  */
1127 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1128 {
1129 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1130 	struct amdgpu_bo_user *ubo;
1131 
1132 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1133 	if (adev->family <= AMDGPU_FAMILY_CZ &&
1134 	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1135 		return -EINVAL;
1136 
1137 	ubo = to_amdgpu_bo_user(bo);
1138 	ubo->tiling_flags = tiling_flags;
1139 	return 0;
1140 }
1141 
1142 /**
1143  * amdgpu_bo_get_tiling_flags - get tiling flags
1144  * @bo: &amdgpu_bo buffer object
1145  * @tiling_flags: returned flags
1146  *
1147  * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1148  * set the tiling flags on a buffer.
1149  */
1150 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1151 {
1152 	struct amdgpu_bo_user *ubo;
1153 
1154 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1155 	dma_resv_assert_held(bo->tbo.base.resv);
1156 	ubo = to_amdgpu_bo_user(bo);
1157 
1158 	if (tiling_flags)
1159 		*tiling_flags = ubo->tiling_flags;
1160 }
1161 
1162 /**
1163  * amdgpu_bo_set_metadata - set metadata
1164  * @bo: &amdgpu_bo buffer object
1165  * @metadata: new metadata
1166  * @metadata_size: size of the new metadata
1167  * @flags: flags of the new metadata
1168  *
1169  * Sets buffer object's metadata, its size and flags.
1170  * Used via GEM ioctl.
1171  *
1172  * Returns:
1173  * 0 for success or a negative error code on failure.
1174  */
1175 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1176 			    uint32_t metadata_size, uint64_t flags)
1177 {
1178 	struct amdgpu_bo_user *ubo;
1179 	void *buffer;
1180 
1181 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1182 	ubo = to_amdgpu_bo_user(bo);
1183 	if (!metadata_size) {
1184 		if (ubo->metadata_size) {
1185 			kfree(ubo->metadata);
1186 			ubo->metadata = NULL;
1187 			ubo->metadata_size = 0;
1188 		}
1189 		return 0;
1190 	}
1191 
1192 	if (metadata == NULL)
1193 		return -EINVAL;
1194 
1195 	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1196 	if (buffer == NULL)
1197 		return -ENOMEM;
1198 
1199 	kfree(ubo->metadata);
1200 	ubo->metadata_flags = flags;
1201 	ubo->metadata = buffer;
1202 	ubo->metadata_size = metadata_size;
1203 
1204 	return 0;
1205 }
1206 
1207 /**
1208  * amdgpu_bo_get_metadata - get metadata
1209  * @bo: &amdgpu_bo buffer object
1210  * @buffer: returned metadata
1211  * @buffer_size: size of the buffer
1212  * @metadata_size: size of the returned metadata
1213  * @flags: flags of the returned metadata
1214  *
1215  * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1216  * less than metadata_size.
1217  * Used via GEM ioctl.
1218  *
1219  * Returns:
1220  * 0 for success or a negative error code on failure.
1221  */
1222 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1223 			   size_t buffer_size, uint32_t *metadata_size,
1224 			   uint64_t *flags)
1225 {
1226 	struct amdgpu_bo_user *ubo;
1227 
1228 	if (!buffer && !metadata_size)
1229 		return -EINVAL;
1230 
1231 	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1232 	ubo = to_amdgpu_bo_user(bo);
1233 	if (buffer) {
1234 		if (buffer_size < ubo->metadata_size)
1235 			return -EINVAL;
1236 
1237 		if (ubo->metadata_size)
1238 			memcpy(buffer, ubo->metadata, ubo->metadata_size);
1239 	}
1240 
1241 	if (metadata_size)
1242 		*metadata_size = ubo->metadata_size;
1243 	if (flags)
1244 		*flags = ubo->metadata_flags;
1245 
1246 	return 0;
1247 }
1248 
1249 /**
1250  * amdgpu_bo_move_notify - notification about a memory move
1251  * @bo: pointer to a buffer object
1252  * @evict: if this move is evicting the buffer from the graphics address space
1253  * @new_mem: new information of the bufer object
1254  *
1255  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1256  * bookkeeping.
1257  * TTM driver callback which is called when ttm moves a buffer.
1258  */
1259 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1260 			   bool evict,
1261 			   struct ttm_resource *new_mem)
1262 {
1263 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1264 	struct amdgpu_bo *abo;
1265 	struct ttm_resource *old_mem = &bo->mem;
1266 
1267 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1268 		return;
1269 
1270 	abo = ttm_to_amdgpu_bo(bo);
1271 	amdgpu_vm_bo_invalidate(adev, abo, evict);
1272 
1273 	amdgpu_bo_kunmap(abo);
1274 
1275 	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1276 	    bo->mem.mem_type != TTM_PL_SYSTEM)
1277 		dma_buf_move_notify(abo->tbo.base.dma_buf);
1278 
1279 	/* remember the eviction */
1280 	if (evict)
1281 		atomic64_inc(&adev->num_evictions);
1282 
1283 	/* update statistics */
1284 	if (!new_mem)
1285 		return;
1286 
1287 	/* move_notify is called before move happens */
1288 	trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1289 }
1290 
1291 void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
1292 				uint64_t *gtt_mem, uint64_t *cpu_mem)
1293 {
1294 	unsigned int domain;
1295 
1296 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
1297 	switch (domain) {
1298 	case AMDGPU_GEM_DOMAIN_VRAM:
1299 		*vram_mem += amdgpu_bo_size(bo);
1300 		break;
1301 	case AMDGPU_GEM_DOMAIN_GTT:
1302 		*gtt_mem += amdgpu_bo_size(bo);
1303 		break;
1304 	case AMDGPU_GEM_DOMAIN_CPU:
1305 	default:
1306 		*cpu_mem += amdgpu_bo_size(bo);
1307 		break;
1308 	}
1309 }
1310 
1311 /**
1312  * amdgpu_bo_release_notify - notification about a BO being released
1313  * @bo: pointer to a buffer object
1314  *
1315  * Wipes VRAM buffers whose contents should not be leaked before the
1316  * memory is released.
1317  */
1318 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1319 {
1320 	struct dma_fence *fence = NULL;
1321 	struct amdgpu_bo *abo;
1322 	int r;
1323 
1324 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1325 		return;
1326 
1327 	abo = ttm_to_amdgpu_bo(bo);
1328 
1329 	if (abo->kfd_bo)
1330 		amdgpu_amdkfd_unreserve_memory_limit(abo);
1331 
1332 	/* We only remove the fence if the resv has individualized. */
1333 	WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1334 			&& bo->base.resv != &bo->base._resv);
1335 	if (bo->base.resv == &bo->base._resv)
1336 		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1337 
1338 	if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
1339 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
1340 		return;
1341 
1342 	dma_resv_lock(bo->base.resv, NULL);
1343 
1344 	r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1345 	if (!WARN_ON(r)) {
1346 		amdgpu_bo_fence(abo, fence, false);
1347 		dma_fence_put(fence);
1348 	}
1349 
1350 	dma_resv_unlock(bo->base.resv);
1351 }
1352 
1353 /**
1354  * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1355  * @bo: pointer to a buffer object
1356  *
1357  * Notifies the driver we are taking a fault on this BO and have reserved it,
1358  * also performs bookkeeping.
1359  * TTM driver callback for dealing with vm faults.
1360  *
1361  * Returns:
1362  * 0 for success or a negative error code on failure.
1363  */
1364 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1365 {
1366 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1367 	struct ttm_operation_ctx ctx = { false, false };
1368 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1369 	unsigned long offset;
1370 	int r;
1371 
1372 	/* Remember that this BO was accessed by the CPU */
1373 	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1374 
1375 	if (bo->mem.mem_type != TTM_PL_VRAM)
1376 		return 0;
1377 
1378 	offset = bo->mem.start << PAGE_SHIFT;
1379 	if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
1380 		return 0;
1381 
1382 	/* Can't move a pinned BO to visible VRAM */
1383 	if (abo->tbo.pin_count > 0)
1384 		return VM_FAULT_SIGBUS;
1385 
1386 	/* hurrah the memory is not visible ! */
1387 	atomic64_inc(&adev->num_vram_cpu_page_faults);
1388 	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1389 					AMDGPU_GEM_DOMAIN_GTT);
1390 
1391 	/* Avoid costly evictions; only set GTT as a busy placement */
1392 	abo->placement.num_busy_placement = 1;
1393 	abo->placement.busy_placement = &abo->placements[1];
1394 
1395 	r = ttm_bo_validate(bo, &abo->placement, &ctx);
1396 	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1397 		return VM_FAULT_NOPAGE;
1398 	else if (unlikely(r))
1399 		return VM_FAULT_SIGBUS;
1400 
1401 	offset = bo->mem.start << PAGE_SHIFT;
1402 	/* this should never happen */
1403 	if (bo->mem.mem_type == TTM_PL_VRAM &&
1404 	    (offset + bo->base.size) > adev->gmc.visible_vram_size)
1405 		return VM_FAULT_SIGBUS;
1406 
1407 	ttm_bo_move_to_lru_tail_unlocked(bo);
1408 	return 0;
1409 }
1410 
1411 /**
1412  * amdgpu_bo_fence - add fence to buffer object
1413  *
1414  * @bo: buffer object in question
1415  * @fence: fence to add
1416  * @shared: true if fence should be added shared
1417  *
1418  */
1419 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1420 		     bool shared)
1421 {
1422 	struct dma_resv *resv = bo->tbo.base.resv;
1423 
1424 	if (shared)
1425 		dma_resv_add_shared_fence(resv, fence);
1426 	else
1427 		dma_resv_add_excl_fence(resv, fence);
1428 }
1429 
1430 /**
1431  * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1432  *
1433  * @adev: amdgpu device pointer
1434  * @resv: reservation object to sync to
1435  * @sync_mode: synchronization mode
1436  * @owner: fence owner
1437  * @intr: Whether the wait is interruptible
1438  *
1439  * Extract the fences from the reservation object and waits for them to finish.
1440  *
1441  * Returns:
1442  * 0 on success, errno otherwise.
1443  */
1444 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1445 			     enum amdgpu_sync_mode sync_mode, void *owner,
1446 			     bool intr)
1447 {
1448 	struct amdgpu_sync sync;
1449 	int r;
1450 
1451 	amdgpu_sync_create(&sync);
1452 	amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1453 	r = amdgpu_sync_wait(&sync, intr);
1454 	amdgpu_sync_free(&sync);
1455 	return r;
1456 }
1457 
1458 /**
1459  * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
1460  * @bo: buffer object to wait for
1461  * @owner: fence owner
1462  * @intr: Whether the wait is interruptible
1463  *
1464  * Wrapper to wait for fences in a BO.
1465  * Returns:
1466  * 0 on success, errno otherwise.
1467  */
1468 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1469 {
1470 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1471 
1472 	return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1473 					AMDGPU_SYNC_NE_OWNER, owner, intr);
1474 }
1475 
1476 /**
1477  * amdgpu_bo_gpu_offset - return GPU offset of bo
1478  * @bo:	amdgpu object for which we query the offset
1479  *
1480  * Note: object should either be pinned or reserved when calling this
1481  * function, it might be useful to add check for this for debugging.
1482  *
1483  * Returns:
1484  * current GPU offset of the object.
1485  */
1486 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1487 {
1488 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1489 	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1490 		     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1491 	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1492 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1493 		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1494 
1495 	return amdgpu_bo_gpu_offset_no_check(bo);
1496 }
1497 
1498 /**
1499  * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1500  * @bo:	amdgpu object for which we query the offset
1501  *
1502  * Returns:
1503  * current GPU offset of the object without raising warnings.
1504  */
1505 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1506 {
1507 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1508 	uint64_t offset;
1509 
1510 	offset = (bo->tbo.mem.start << PAGE_SHIFT) +
1511 		 amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
1512 
1513 	return amdgpu_gmc_sign_extend(offset);
1514 }
1515 
1516 /**
1517  * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
1518  * @adev: amdgpu device object
1519  * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1520  *
1521  * Returns:
1522  * Which of the allowed domains is preferred for pinning the BO for scanout.
1523  */
1524 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1525 					    uint32_t domain)
1526 {
1527 	if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1528 		domain = AMDGPU_GEM_DOMAIN_VRAM;
1529 		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1530 			domain = AMDGPU_GEM_DOMAIN_GTT;
1531 	}
1532 	return domain;
1533 }
1534 
1535 #if defined(CONFIG_DEBUG_FS)
1536 #define amdgpu_bo_print_flag(m, bo, flag)		        \
1537 	do {							\
1538 		if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
1539 			seq_printf((m), " " #flag);		\
1540 		}						\
1541 	} while (0)
1542 
1543 /**
1544  * amdgpu_bo_print_info - print BO info in debugfs file
1545  *
1546  * @id: Index or Id of the BO
1547  * @bo: Requested BO for printing info
1548  * @m: debugfs file
1549  *
1550  * Print BO information in debugfs file
1551  *
1552  * Returns:
1553  * Size of the BO in bytes.
1554  */
1555 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1556 {
1557 	struct dma_buf_attachment *attachment;
1558 	struct dma_buf *dma_buf;
1559 	unsigned int domain;
1560 	const char *placement;
1561 	unsigned int pin_count;
1562 	u64 size;
1563 
1564 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
1565 	switch (domain) {
1566 	case AMDGPU_GEM_DOMAIN_VRAM:
1567 		placement = "VRAM";
1568 		break;
1569 	case AMDGPU_GEM_DOMAIN_GTT:
1570 		placement = " GTT";
1571 		break;
1572 	case AMDGPU_GEM_DOMAIN_CPU:
1573 	default:
1574 		placement = " CPU";
1575 		break;
1576 	}
1577 
1578 	size = amdgpu_bo_size(bo);
1579 	seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1580 			id, size, placement);
1581 
1582 	pin_count = READ_ONCE(bo->tbo.pin_count);
1583 	if (pin_count)
1584 		seq_printf(m, " pin count %d", pin_count);
1585 
1586 	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1587 	attachment = READ_ONCE(bo->tbo.base.import_attach);
1588 
1589 	if (attachment)
1590 		seq_printf(m, " imported from %p", dma_buf);
1591 	else if (dma_buf)
1592 		seq_printf(m, " exported as %p", dma_buf);
1593 
1594 	amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1595 	amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1596 	amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1597 	amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1598 	amdgpu_bo_print_flag(m, bo, SHADOW);
1599 	amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1600 	amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1601 	amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1602 
1603 	seq_puts(m, "\n");
1604 
1605 	return size;
1606 }
1607 #endif
1608