1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <[email protected]>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/amdgpu_drm.h>
36 #include <drm/drm_cache.h>
37 #include "amdgpu.h"
38 #include "amdgpu_trace.h"
39 #include "amdgpu_amdkfd.h"
40 
41 /**
42  * DOC: amdgpu_object
43  *
44  * This defines the interfaces to operate on an &amdgpu_bo buffer object which
45  * represents memory used by driver (VRAM, system memory, etc.). The driver
46  * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
47  * to create/destroy/set buffer object which are then managed by the kernel TTM
48  * memory manager.
49  * The interfaces are also used internally by kernel clients, including gfx,
50  * uvd, etc. for kernel managed allocations used by the GPU.
51  *
52  */
53 
54 static bool amdgpu_need_backup(struct amdgpu_device *adev)
55 {
56 	if (adev->flags & AMD_IS_APU)
57 		return false;
58 
59 	if (amdgpu_gpu_recovery == 0 ||
60 	    (amdgpu_gpu_recovery == -1  && !amdgpu_sriov_vf(adev)))
61 		return false;
62 
63 	return true;
64 }
65 
66 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
67 {
68 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
69 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
70 
71 	if (bo->kfd_bo)
72 		amdgpu_amdkfd_unreserve_system_memory_limit(bo);
73 
74 	amdgpu_bo_kunmap(bo);
75 
76 	if (bo->gem_base.import_attach)
77 		drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
78 	drm_gem_object_release(&bo->gem_base);
79 	amdgpu_bo_unref(&bo->parent);
80 	if (!list_empty(&bo->shadow_list)) {
81 		mutex_lock(&adev->shadow_list_lock);
82 		list_del_init(&bo->shadow_list);
83 		mutex_unlock(&adev->shadow_list_lock);
84 	}
85 	kfree(bo->metadata);
86 	kfree(bo);
87 }
88 
89 /**
90  * amdgpu_ttm_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
91  * @bo: buffer object to be checked
92  *
93  * Uses destroy function associated with the object to determine if this is
94  * an &amdgpu_bo.
95  *
96  * Returns:
97  * true if the object belongs to &amdgpu_bo, false if not.
98  */
99 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
100 {
101 	if (bo->destroy == &amdgpu_ttm_bo_destroy)
102 		return true;
103 	return false;
104 }
105 
106 /**
107  * amdgpu_ttm_placement_from_domain - set buffer's placement
108  * @abo: &amdgpu_bo buffer object whose placement is to be set
109  * @domain: requested domain
110  *
111  * Sets buffer's placement according to requested domain and the buffer's
112  * flags.
113  */
114 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
115 {
116 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
117 	struct ttm_placement *placement = &abo->placement;
118 	struct ttm_place *places = abo->placements;
119 	u64 flags = abo->flags;
120 	u32 c = 0;
121 
122 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
123 		unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
124 
125 		places[c].fpfn = 0;
126 		places[c].lpfn = 0;
127 		places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
128 			TTM_PL_FLAG_VRAM;
129 
130 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
131 			places[c].lpfn = visible_pfn;
132 		else
133 			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
134 
135 		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
136 			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
137 		c++;
138 	}
139 
140 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
141 		places[c].fpfn = 0;
142 		if (flags & AMDGPU_GEM_CREATE_SHADOW)
143 			places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
144 		else
145 			places[c].lpfn = 0;
146 		places[c].flags = TTM_PL_FLAG_TT;
147 		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
148 			places[c].flags |= TTM_PL_FLAG_WC |
149 				TTM_PL_FLAG_UNCACHED;
150 		else
151 			places[c].flags |= TTM_PL_FLAG_CACHED;
152 		c++;
153 	}
154 
155 	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
156 		places[c].fpfn = 0;
157 		places[c].lpfn = 0;
158 		places[c].flags = TTM_PL_FLAG_SYSTEM;
159 		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
160 			places[c].flags |= TTM_PL_FLAG_WC |
161 				TTM_PL_FLAG_UNCACHED;
162 		else
163 			places[c].flags |= TTM_PL_FLAG_CACHED;
164 		c++;
165 	}
166 
167 	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
168 		places[c].fpfn = 0;
169 		places[c].lpfn = 0;
170 		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
171 		c++;
172 	}
173 
174 	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
175 		places[c].fpfn = 0;
176 		places[c].lpfn = 0;
177 		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
178 		c++;
179 	}
180 
181 	if (domain & AMDGPU_GEM_DOMAIN_OA) {
182 		places[c].fpfn = 0;
183 		places[c].lpfn = 0;
184 		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
185 		c++;
186 	}
187 
188 	if (!c) {
189 		places[c].fpfn = 0;
190 		places[c].lpfn = 0;
191 		places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
192 		c++;
193 	}
194 
195 	placement->num_placement = c;
196 	placement->placement = places;
197 
198 	placement->num_busy_placement = c;
199 	placement->busy_placement = places;
200 }
201 
202 /**
203  * amdgpu_bo_create_reserved - create reserved BO for kernel use
204  *
205  * @adev: amdgpu device object
206  * @size: size for the new BO
207  * @align: alignment for the new BO
208  * @domain: where to place it
209  * @bo_ptr: used to initialize BOs in structures
210  * @gpu_addr: GPU addr of the pinned BO
211  * @cpu_addr: optional CPU address mapping
212  *
213  * Allocates and pins a BO for kernel internal use, and returns it still
214  * reserved.
215  *
216  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
217  *
218  * Returns:
219  * 0 on success, negative error code otherwise.
220  */
221 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
222 			      unsigned long size, int align,
223 			      u32 domain, struct amdgpu_bo **bo_ptr,
224 			      u64 *gpu_addr, void **cpu_addr)
225 {
226 	struct amdgpu_bo_param bp;
227 	bool free = false;
228 	int r;
229 
230 	memset(&bp, 0, sizeof(bp));
231 	bp.size = size;
232 	bp.byte_align = align;
233 	bp.domain = domain;
234 	bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
235 		AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
236 	bp.type = ttm_bo_type_kernel;
237 	bp.resv = NULL;
238 
239 	if (!*bo_ptr) {
240 		r = amdgpu_bo_create(adev, &bp, bo_ptr);
241 		if (r) {
242 			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
243 				r);
244 			return r;
245 		}
246 		free = true;
247 	}
248 
249 	r = amdgpu_bo_reserve(*bo_ptr, false);
250 	if (r) {
251 		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
252 		goto error_free;
253 	}
254 
255 	r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
256 	if (r) {
257 		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
258 		goto error_unreserve;
259 	}
260 
261 	if (cpu_addr) {
262 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
263 		if (r) {
264 			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
265 			goto error_unreserve;
266 		}
267 	}
268 
269 	return 0;
270 
271 error_unreserve:
272 	amdgpu_bo_unreserve(*bo_ptr);
273 
274 error_free:
275 	if (free)
276 		amdgpu_bo_unref(bo_ptr);
277 
278 	return r;
279 }
280 
281 /**
282  * amdgpu_bo_create_kernel - create BO for kernel use
283  *
284  * @adev: amdgpu device object
285  * @size: size for the new BO
286  * @align: alignment for the new BO
287  * @domain: where to place it
288  * @bo_ptr:  used to initialize BOs in structures
289  * @gpu_addr: GPU addr of the pinned BO
290  * @cpu_addr: optional CPU address mapping
291  *
292  * Allocates and pins a BO for kernel internal use.
293  *
294  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
295  *
296  * Returns:
297  * 0 on success, negative error code otherwise.
298  */
299 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
300 			    unsigned long size, int align,
301 			    u32 domain, struct amdgpu_bo **bo_ptr,
302 			    u64 *gpu_addr, void **cpu_addr)
303 {
304 	int r;
305 
306 	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
307 				      gpu_addr, cpu_addr);
308 
309 	if (r)
310 		return r;
311 
312 	amdgpu_bo_unreserve(*bo_ptr);
313 
314 	return 0;
315 }
316 
317 /**
318  * amdgpu_bo_free_kernel - free BO for kernel use
319  *
320  * @bo: amdgpu BO to free
321  * @gpu_addr: pointer to where the BO's GPU memory space address was stored
322  * @cpu_addr: pointer to where the BO's CPU memory space address was stored
323  *
324  * unmaps and unpin a BO for kernel internal use.
325  */
326 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
327 			   void **cpu_addr)
328 {
329 	if (*bo == NULL)
330 		return;
331 
332 	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
333 		if (cpu_addr)
334 			amdgpu_bo_kunmap(*bo);
335 
336 		amdgpu_bo_unpin(*bo);
337 		amdgpu_bo_unreserve(*bo);
338 	}
339 	amdgpu_bo_unref(bo);
340 
341 	if (gpu_addr)
342 		*gpu_addr = 0;
343 
344 	if (cpu_addr)
345 		*cpu_addr = NULL;
346 }
347 
348 /* Validate bo size is bit bigger then the request domain */
349 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
350 					  unsigned long size, u32 domain)
351 {
352 	struct ttm_mem_type_manager *man = NULL;
353 
354 	/*
355 	 * If GTT is part of requested domains the check must succeed to
356 	 * allow fall back to GTT
357 	 */
358 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
359 		man = &adev->mman.bdev.man[TTM_PL_TT];
360 
361 		if (size < (man->size << PAGE_SHIFT))
362 			return true;
363 		else
364 			goto fail;
365 	}
366 
367 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
368 		man = &adev->mman.bdev.man[TTM_PL_VRAM];
369 
370 		if (size < (man->size << PAGE_SHIFT))
371 			return true;
372 		else
373 			goto fail;
374 	}
375 
376 
377 	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
378 	return true;
379 
380 fail:
381 	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
382 		  man->size << PAGE_SHIFT);
383 	return false;
384 }
385 
386 static int amdgpu_bo_do_create(struct amdgpu_device *adev,
387 			       struct amdgpu_bo_param *bp,
388 			       struct amdgpu_bo **bo_ptr)
389 {
390 	struct ttm_operation_ctx ctx = {
391 		.interruptible = (bp->type != ttm_bo_type_kernel),
392 		.no_wait_gpu = false,
393 		.resv = bp->resv,
394 		.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
395 	};
396 	struct amdgpu_bo *bo;
397 	unsigned long page_align, size = bp->size;
398 	size_t acc_size;
399 	int r;
400 
401 	page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
402 	size = ALIGN(size, PAGE_SIZE);
403 
404 	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
405 		return -ENOMEM;
406 
407 	*bo_ptr = NULL;
408 
409 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
410 				       sizeof(struct amdgpu_bo));
411 
412 	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
413 	if (bo == NULL)
414 		return -ENOMEM;
415 	drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
416 	INIT_LIST_HEAD(&bo->shadow_list);
417 	INIT_LIST_HEAD(&bo->va);
418 	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
419 		bp->domain;
420 	bo->allowed_domains = bo->preferred_domains;
421 	if (bp->type != ttm_bo_type_kernel &&
422 	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
423 		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
424 
425 	bo->flags = bp->flags;
426 
427 #ifdef CONFIG_X86_32
428 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
429 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
430 	 */
431 	bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
432 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
433 	/* Don't try to enable write-combining when it can't work, or things
434 	 * may be slow
435 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
436 	 */
437 
438 #ifndef CONFIG_COMPILE_TEST
439 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
440 	 thanks to write-combining
441 #endif
442 
443 	if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
444 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
445 			      "better performance thanks to write-combining\n");
446 	bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
447 #else
448 	/* For architectures that don't support WC memory,
449 	 * mask out the WC flag from the BO
450 	 */
451 	if (!drm_arch_can_wc_memory())
452 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
453 #endif
454 
455 	bo->tbo.bdev = &adev->mman.bdev;
456 	amdgpu_ttm_placement_from_domain(bo, bp->domain);
457 	if (bp->type == ttm_bo_type_kernel)
458 		bo->tbo.priority = 1;
459 
460 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
461 				 &bo->placement, page_align, &ctx, acc_size,
462 				 NULL, bp->resv, &amdgpu_ttm_bo_destroy);
463 	if (unlikely(r != 0))
464 		return r;
465 
466 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
467 	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
468 	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
469 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
470 					     ctx.bytes_moved);
471 	else
472 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
473 
474 	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
475 	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
476 		struct dma_fence *fence;
477 
478 		r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
479 		if (unlikely(r))
480 			goto fail_unreserve;
481 
482 		amdgpu_bo_fence(bo, fence, false);
483 		dma_fence_put(bo->tbo.moving);
484 		bo->tbo.moving = dma_fence_get(fence);
485 		dma_fence_put(fence);
486 	}
487 	if (!bp->resv)
488 		amdgpu_bo_unreserve(bo);
489 	*bo_ptr = bo;
490 
491 	trace_amdgpu_bo_create(bo);
492 
493 	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
494 	if (bp->type == ttm_bo_type_device)
495 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
496 
497 	return 0;
498 
499 fail_unreserve:
500 	if (!bp->resv)
501 		ww_mutex_unlock(&bo->tbo.resv->lock);
502 	amdgpu_bo_unref(&bo);
503 	return r;
504 }
505 
506 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
507 				   unsigned long size, int byte_align,
508 				   struct amdgpu_bo *bo)
509 {
510 	struct amdgpu_bo_param bp;
511 	int r;
512 
513 	if (bo->shadow)
514 		return 0;
515 
516 	memset(&bp, 0, sizeof(bp));
517 	bp.size = size;
518 	bp.byte_align = byte_align;
519 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
520 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
521 		AMDGPU_GEM_CREATE_SHADOW;
522 	bp.type = ttm_bo_type_kernel;
523 	bp.resv = bo->tbo.resv;
524 
525 	r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
526 	if (!r) {
527 		bo->shadow->parent = amdgpu_bo_ref(bo);
528 		mutex_lock(&adev->shadow_list_lock);
529 		list_add_tail(&bo->shadow_list, &adev->shadow_list);
530 		mutex_unlock(&adev->shadow_list_lock);
531 	}
532 
533 	return r;
534 }
535 
536 /**
537  * amdgpu_bo_create - create an &amdgpu_bo buffer object
538  * @adev: amdgpu device object
539  * @bp: parameters to be used for the buffer object
540  * @bo_ptr: pointer to the buffer object pointer
541  *
542  * Creates an &amdgpu_bo buffer object; and if requested, also creates a
543  * shadow object.
544  * Shadow object is used to backup the original buffer object, and is always
545  * in GTT.
546  *
547  * Returns:
548  * 0 for success or a negative error code on failure.
549  */
550 int amdgpu_bo_create(struct amdgpu_device *adev,
551 		     struct amdgpu_bo_param *bp,
552 		     struct amdgpu_bo **bo_ptr)
553 {
554 	u64 flags = bp->flags;
555 	int r;
556 
557 	bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
558 	r = amdgpu_bo_do_create(adev, bp, bo_ptr);
559 	if (r)
560 		return r;
561 
562 	if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
563 		if (!bp->resv)
564 			WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
565 							NULL));
566 
567 		r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
568 
569 		if (!bp->resv)
570 			reservation_object_unlock((*bo_ptr)->tbo.resv);
571 
572 		if (r)
573 			amdgpu_bo_unref(bo_ptr);
574 	}
575 
576 	return r;
577 }
578 
579 /**
580  * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
581  * @adev: amdgpu device object
582  * @ring: amdgpu_ring for the engine handling the buffer operations
583  * @bo: &amdgpu_bo buffer to be backed up
584  * @resv: reservation object with embedded fence
585  * @fence: dma_fence associated with the operation
586  * @direct: whether to submit the job directly
587  *
588  * Copies an &amdgpu_bo buffer object to its shadow object.
589  * Not used for now.
590  *
591  * Returns:
592  * 0 for success or a negative error code on failure.
593  */
594 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
595 			       struct amdgpu_ring *ring,
596 			       struct amdgpu_bo *bo,
597 			       struct reservation_object *resv,
598 			       struct dma_fence **fence,
599 			       bool direct)
600 
601 {
602 	struct amdgpu_bo *shadow = bo->shadow;
603 	uint64_t bo_addr, shadow_addr;
604 	int r;
605 
606 	if (!shadow)
607 		return -EINVAL;
608 
609 	bo_addr = amdgpu_bo_gpu_offset(bo);
610 	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
611 
612 	r = reservation_object_reserve_shared(bo->tbo.resv);
613 	if (r)
614 		goto err;
615 
616 	r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
617 			       amdgpu_bo_size(bo), resv, fence,
618 			       direct, false);
619 	if (!r)
620 		amdgpu_bo_fence(bo, *fence, true);
621 
622 err:
623 	return r;
624 }
625 
626 /**
627  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
628  * @bo: pointer to the buffer object
629  *
630  * Sets placement according to domain; and changes placement and caching
631  * policy of the buffer object according to the placement.
632  * This is used for validating shadow bos.  It calls ttm_bo_validate() to
633  * make sure the buffer is resident where it needs to be.
634  *
635  * Returns:
636  * 0 for success or a negative error code on failure.
637  */
638 int amdgpu_bo_validate(struct amdgpu_bo *bo)
639 {
640 	struct ttm_operation_ctx ctx = { false, false };
641 	uint32_t domain;
642 	int r;
643 
644 	if (bo->pin_count)
645 		return 0;
646 
647 	domain = bo->preferred_domains;
648 
649 retry:
650 	amdgpu_ttm_placement_from_domain(bo, domain);
651 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
652 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
653 		domain = bo->allowed_domains;
654 		goto retry;
655 	}
656 
657 	return r;
658 }
659 
660 /**
661  * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object
662  * @adev: amdgpu device object
663  * @ring: amdgpu_ring for the engine handling the buffer operations
664  * @bo: &amdgpu_bo buffer to be restored
665  * @resv: reservation object with embedded fence
666  * @fence: dma_fence associated with the operation
667  * @direct: whether to submit the job directly
668  *
669  * Copies a buffer object's shadow content back to the object.
670  * This is used for recovering a buffer from its shadow in case of a gpu
671  * reset where vram context may be lost.
672  *
673  * Returns:
674  * 0 for success or a negative error code on failure.
675  */
676 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
677 				  struct amdgpu_ring *ring,
678 				  struct amdgpu_bo *bo,
679 				  struct reservation_object *resv,
680 				  struct dma_fence **fence,
681 				  bool direct)
682 
683 {
684 	struct amdgpu_bo *shadow = bo->shadow;
685 	uint64_t bo_addr, shadow_addr;
686 	int r;
687 
688 	if (!shadow)
689 		return -EINVAL;
690 
691 	bo_addr = amdgpu_bo_gpu_offset(bo);
692 	shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
693 
694 	r = reservation_object_reserve_shared(bo->tbo.resv);
695 	if (r)
696 		goto err;
697 
698 	r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
699 			       amdgpu_bo_size(bo), resv, fence,
700 			       direct, false);
701 	if (!r)
702 		amdgpu_bo_fence(bo, *fence, true);
703 
704 err:
705 	return r;
706 }
707 
708 /**
709  * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
710  * @bo: &amdgpu_bo buffer object to be mapped
711  * @ptr: kernel virtual address to be returned
712  *
713  * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
714  * amdgpu_bo_kptr() to get the kernel virtual address.
715  *
716  * Returns:
717  * 0 for success or a negative error code on failure.
718  */
719 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
720 {
721 	void *kptr;
722 	long r;
723 
724 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
725 		return -EPERM;
726 
727 	kptr = amdgpu_bo_kptr(bo);
728 	if (kptr) {
729 		if (ptr)
730 			*ptr = kptr;
731 		return 0;
732 	}
733 
734 	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
735 						MAX_SCHEDULE_TIMEOUT);
736 	if (r < 0)
737 		return r;
738 
739 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
740 	if (r)
741 		return r;
742 
743 	if (ptr)
744 		*ptr = amdgpu_bo_kptr(bo);
745 
746 	return 0;
747 }
748 
749 /**
750  * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
751  * @bo: &amdgpu_bo buffer object
752  *
753  * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
754  *
755  * Returns:
756  * the virtual address of a buffer object area.
757  */
758 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
759 {
760 	bool is_iomem;
761 
762 	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
763 }
764 
765 /**
766  * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
767  * @bo: &amdgpu_bo buffer object to be unmapped
768  *
769  * Unmaps a kernel map set up by amdgpu_bo_kmap().
770  */
771 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
772 {
773 	if (bo->kmap.bo)
774 		ttm_bo_kunmap(&bo->kmap);
775 }
776 
777 /**
778  * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
779  * @bo: &amdgpu_bo buffer object
780  *
781  * References the contained &ttm_buffer_object.
782  *
783  * Returns:
784  * a refcounted pointer to the &amdgpu_bo buffer object.
785  */
786 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
787 {
788 	if (bo == NULL)
789 		return NULL;
790 
791 	ttm_bo_reference(&bo->tbo);
792 	return bo;
793 }
794 
795 /**
796  * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
797  * @bo: &amdgpu_bo buffer object
798  *
799  * Unreferences the contained &ttm_buffer_object and clear the pointer
800  */
801 void amdgpu_bo_unref(struct amdgpu_bo **bo)
802 {
803 	struct ttm_buffer_object *tbo;
804 
805 	if ((*bo) == NULL)
806 		return;
807 
808 	tbo = &((*bo)->tbo);
809 	ttm_bo_unref(&tbo);
810 	if (tbo == NULL)
811 		*bo = NULL;
812 }
813 
814 /**
815  * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
816  * @bo: &amdgpu_bo buffer object to be pinned
817  * @domain: domain to be pinned to
818  * @min_offset: the start of requested address range
819  * @max_offset: the end of requested address range
820  * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
821  *
822  * Pins the buffer object according to requested domain and address range. If
823  * the memory is unbound gart memory, binds the pages into gart table. Adjusts
824  * pin_count and pin_size accordingly.
825  *
826  * Pinning means to lock pages in memory along with keeping them at a fixed
827  * offset. It is required when a buffer can not be moved, for example, when
828  * a display buffer is being scanned out.
829  *
830  * Compared with amdgpu_bo_pin(), this function gives more flexibility on
831  * where to pin a buffer if there are specific restrictions on where a buffer
832  * must be located.
833  *
834  * Returns:
835  * 0 for success or a negative error code on failure.
836  */
837 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
838 			     u64 min_offset, u64 max_offset,
839 			     u64 *gpu_addr)
840 {
841 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
842 	struct ttm_operation_ctx ctx = { false, false };
843 	int r, i;
844 
845 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
846 		return -EPERM;
847 
848 	if (WARN_ON_ONCE(min_offset > max_offset))
849 		return -EINVAL;
850 
851 	/* A shared bo cannot be migrated to VRAM */
852 	if (bo->prime_shared_count) {
853 		if (domain & AMDGPU_GEM_DOMAIN_GTT)
854 			domain = AMDGPU_GEM_DOMAIN_GTT;
855 		else
856 			return -EINVAL;
857 	}
858 
859 	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
860 	 * See function amdgpu_display_supported_domains()
861 	 */
862 	domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
863 
864 	if (bo->pin_count) {
865 		uint32_t mem_type = bo->tbo.mem.mem_type;
866 
867 		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
868 			return -EINVAL;
869 
870 		bo->pin_count++;
871 		if (gpu_addr)
872 			*gpu_addr = amdgpu_bo_gpu_offset(bo);
873 
874 		if (max_offset != 0) {
875 			u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
876 			WARN_ON_ONCE(max_offset <
877 				     (amdgpu_bo_gpu_offset(bo) - domain_start));
878 		}
879 
880 		return 0;
881 	}
882 
883 	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
884 	/* force to pin into visible video ram */
885 	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
886 		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
887 	amdgpu_ttm_placement_from_domain(bo, domain);
888 	for (i = 0; i < bo->placement.num_placement; i++) {
889 		unsigned fpfn, lpfn;
890 
891 		fpfn = min_offset >> PAGE_SHIFT;
892 		lpfn = max_offset >> PAGE_SHIFT;
893 
894 		if (fpfn > bo->placements[i].fpfn)
895 			bo->placements[i].fpfn = fpfn;
896 		if (!bo->placements[i].lpfn ||
897 		    (lpfn && lpfn < bo->placements[i].lpfn))
898 			bo->placements[i].lpfn = lpfn;
899 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
900 	}
901 
902 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
903 	if (unlikely(r)) {
904 		dev_err(adev->dev, "%p pin failed\n", bo);
905 		goto error;
906 	}
907 
908 	r = amdgpu_ttm_alloc_gart(&bo->tbo);
909 	if (unlikely(r)) {
910 		dev_err(adev->dev, "%p bind failed\n", bo);
911 		goto error;
912 	}
913 
914 	bo->pin_count = 1;
915 	if (gpu_addr != NULL)
916 		*gpu_addr = amdgpu_bo_gpu_offset(bo);
917 
918 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
919 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
920 		adev->vram_pin_size += amdgpu_bo_size(bo);
921 		adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
922 	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
923 		adev->gart_pin_size += amdgpu_bo_size(bo);
924 	}
925 
926 error:
927 	return r;
928 }
929 
930 /**
931  * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
932  * @bo: &amdgpu_bo buffer object to be pinned
933  * @domain: domain to be pinned to
934  * @gpu_addr: GPU offset of the &amdgpu_bo buffer object
935  *
936  * A simple wrapper to amdgpu_bo_pin_restricted().
937  * Provides a simpler API for buffers that do not have any strict restrictions
938  * on where a buffer must be located.
939  *
940  * Returns:
941  * 0 for success or a negative error code on failure.
942  */
943 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
944 {
945 	return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
946 }
947 
948 /**
949  * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
950  * @bo: &amdgpu_bo buffer object to be unpinned
951  *
952  * Decreases the pin_count, and clears the flags if pin_count reaches 0.
953  * Changes placement and pin size accordingly.
954  *
955  * Returns:
956  * 0 for success or a negative error code on failure.
957  */
958 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
959 {
960 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
961 	struct ttm_operation_ctx ctx = { false, false };
962 	int r, i;
963 
964 	if (!bo->pin_count) {
965 		dev_warn(adev->dev, "%p unpin not necessary\n", bo);
966 		return 0;
967 	}
968 	bo->pin_count--;
969 	if (bo->pin_count)
970 		return 0;
971 
972 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
973 		adev->vram_pin_size -= amdgpu_bo_size(bo);
974 		adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
975 	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
976 		adev->gart_pin_size -= amdgpu_bo_size(bo);
977 	}
978 
979 	for (i = 0; i < bo->placement.num_placement; i++) {
980 		bo->placements[i].lpfn = 0;
981 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
982 	}
983 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
984 	if (unlikely(r))
985 		dev_err(adev->dev, "%p validate failed for unpin\n", bo);
986 
987 	return r;
988 }
989 
990 /**
991  * amdgpu_bo_evict_vram - evict VRAM buffers
992  * @adev: amdgpu device object
993  *
994  * Evicts all VRAM buffers on the lru list of the memory type.
995  * Mainly used for evicting vram at suspend time.
996  *
997  * Returns:
998  * 0 for success or a negative error code on failure.
999  */
1000 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1001 {
1002 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
1003 	if (0 && (adev->flags & AMD_IS_APU)) {
1004 		/* Useless to evict on IGP chips */
1005 		return 0;
1006 	}
1007 	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
1008 }
1009 
1010 static const char *amdgpu_vram_names[] = {
1011 	"UNKNOWN",
1012 	"GDDR1",
1013 	"DDR2",
1014 	"GDDR3",
1015 	"GDDR4",
1016 	"GDDR5",
1017 	"HBM",
1018 	"DDR3",
1019 	"DDR4",
1020 };
1021 
1022 /**
1023  * amdgpu_bo_init - initialize memory manager
1024  * @adev: amdgpu device object
1025  *
1026  * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1027  *
1028  * Returns:
1029  * 0 for success or a negative error code on failure.
1030  */
1031 int amdgpu_bo_init(struct amdgpu_device *adev)
1032 {
1033 	/* reserve PAT memory space to WC for VRAM */
1034 	arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1035 				   adev->gmc.aper_size);
1036 
1037 	/* Add an MTRR for the VRAM */
1038 	adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1039 					      adev->gmc.aper_size);
1040 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1041 		 adev->gmc.mc_vram_size >> 20,
1042 		 (unsigned long long)adev->gmc.aper_size >> 20);
1043 	DRM_INFO("RAM width %dbits %s\n",
1044 		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1045 	return amdgpu_ttm_init(adev);
1046 }
1047 
1048 /**
1049  * amdgpu_bo_late_init - late init
1050  * @adev: amdgpu device object
1051  *
1052  * Calls amdgpu_ttm_late_init() to free resources used earlier during
1053  * initialization.
1054  *
1055  * Returns:
1056  * 0 for success or a negative error code on failure.
1057  */
1058 int amdgpu_bo_late_init(struct amdgpu_device *adev)
1059 {
1060 	amdgpu_ttm_late_init(adev);
1061 
1062 	return 0;
1063 }
1064 
1065 /**
1066  * amdgpu_bo_fini - tear down memory manager
1067  * @adev: amdgpu device object
1068  *
1069  * Reverses amdgpu_bo_init() to tear down memory manager.
1070  */
1071 void amdgpu_bo_fini(struct amdgpu_device *adev)
1072 {
1073 	amdgpu_ttm_fini(adev);
1074 	arch_phys_wc_del(adev->gmc.vram_mtrr);
1075 	arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1076 }
1077 
1078 /**
1079  * amdgpu_bo_fbdev_mmap - mmap fbdev memory
1080  * @bo: &amdgpu_bo buffer object
1081  * @vma: vma as input from the fbdev mmap method
1082  *
1083  * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
1084  *
1085  * Returns:
1086  * 0 for success or a negative error code on failure.
1087  */
1088 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1089 			     struct vm_area_struct *vma)
1090 {
1091 	return ttm_fbdev_mmap(vma, &bo->tbo);
1092 }
1093 
1094 /**
1095  * amdgpu_bo_set_tiling_flags - set tiling flags
1096  * @bo: &amdgpu_bo buffer object
1097  * @tiling_flags: new flags
1098  *
1099  * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1100  * kernel driver to set the tiling flags on a buffer.
1101  *
1102  * Returns:
1103  * 0 for success or a negative error code on failure.
1104  */
1105 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1106 {
1107 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1108 
1109 	if (adev->family <= AMDGPU_FAMILY_CZ &&
1110 	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1111 		return -EINVAL;
1112 
1113 	bo->tiling_flags = tiling_flags;
1114 	return 0;
1115 }
1116 
1117 /**
1118  * amdgpu_bo_get_tiling_flags - get tiling flags
1119  * @bo: &amdgpu_bo buffer object
1120  * @tiling_flags: returned flags
1121  *
1122  * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1123  * set the tiling flags on a buffer.
1124  */
1125 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1126 {
1127 	lockdep_assert_held(&bo->tbo.resv->lock.base);
1128 
1129 	if (tiling_flags)
1130 		*tiling_flags = bo->tiling_flags;
1131 }
1132 
1133 /**
1134  * amdgpu_bo_set_metadata - set metadata
1135  * @bo: &amdgpu_bo buffer object
1136  * @metadata: new metadata
1137  * @metadata_size: size of the new metadata
1138  * @flags: flags of the new metadata
1139  *
1140  * Sets buffer object's metadata, its size and flags.
1141  * Used via GEM ioctl.
1142  *
1143  * Returns:
1144  * 0 for success or a negative error code on failure.
1145  */
1146 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1147 			    uint32_t metadata_size, uint64_t flags)
1148 {
1149 	void *buffer;
1150 
1151 	if (!metadata_size) {
1152 		if (bo->metadata_size) {
1153 			kfree(bo->metadata);
1154 			bo->metadata = NULL;
1155 			bo->metadata_size = 0;
1156 		}
1157 		return 0;
1158 	}
1159 
1160 	if (metadata == NULL)
1161 		return -EINVAL;
1162 
1163 	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1164 	if (buffer == NULL)
1165 		return -ENOMEM;
1166 
1167 	kfree(bo->metadata);
1168 	bo->metadata_flags = flags;
1169 	bo->metadata = buffer;
1170 	bo->metadata_size = metadata_size;
1171 
1172 	return 0;
1173 }
1174 
1175 /**
1176  * amdgpu_bo_get_metadata - get metadata
1177  * @bo: &amdgpu_bo buffer object
1178  * @buffer: returned metadata
1179  * @buffer_size: size of the buffer
1180  * @metadata_size: size of the returned metadata
1181  * @flags: flags of the returned metadata
1182  *
1183  * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1184  * less than metadata_size.
1185  * Used via GEM ioctl.
1186  *
1187  * Returns:
1188  * 0 for success or a negative error code on failure.
1189  */
1190 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1191 			   size_t buffer_size, uint32_t *metadata_size,
1192 			   uint64_t *flags)
1193 {
1194 	if (!buffer && !metadata_size)
1195 		return -EINVAL;
1196 
1197 	if (buffer) {
1198 		if (buffer_size < bo->metadata_size)
1199 			return -EINVAL;
1200 
1201 		if (bo->metadata_size)
1202 			memcpy(buffer, bo->metadata, bo->metadata_size);
1203 	}
1204 
1205 	if (metadata_size)
1206 		*metadata_size = bo->metadata_size;
1207 	if (flags)
1208 		*flags = bo->metadata_flags;
1209 
1210 	return 0;
1211 }
1212 
1213 /**
1214  * amdgpu_bo_move_notify - notification about a memory move
1215  * @bo: pointer to a buffer object
1216  * @evict: if this move is evicting the buffer from the graphics address space
1217  * @new_mem: new information of the bufer object
1218  *
1219  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1220  * bookkeeping.
1221  * TTM driver callback which is called when ttm moves a buffer.
1222  */
1223 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1224 			   bool evict,
1225 			   struct ttm_mem_reg *new_mem)
1226 {
1227 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1228 	struct amdgpu_bo *abo;
1229 	struct ttm_mem_reg *old_mem = &bo->mem;
1230 
1231 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
1232 		return;
1233 
1234 	abo = ttm_to_amdgpu_bo(bo);
1235 	amdgpu_vm_bo_invalidate(adev, abo, evict);
1236 
1237 	amdgpu_bo_kunmap(abo);
1238 
1239 	/* remember the eviction */
1240 	if (evict)
1241 		atomic64_inc(&adev->num_evictions);
1242 
1243 	/* update statistics */
1244 	if (!new_mem)
1245 		return;
1246 
1247 	/* move_notify is called before move happens */
1248 	trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1249 }
1250 
1251 /**
1252  * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1253  * @bo: pointer to a buffer object
1254  *
1255  * Notifies the driver we are taking a fault on this BO and have reserved it,
1256  * also performs bookkeeping.
1257  * TTM driver callback for dealing with vm faults.
1258  *
1259  * Returns:
1260  * 0 for success or a negative error code on failure.
1261  */
1262 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1263 {
1264 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1265 	struct ttm_operation_ctx ctx = { false, false };
1266 	struct amdgpu_bo *abo;
1267 	unsigned long offset, size;
1268 	int r;
1269 
1270 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
1271 		return 0;
1272 
1273 	abo = ttm_to_amdgpu_bo(bo);
1274 
1275 	/* Remember that this BO was accessed by the CPU */
1276 	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1277 
1278 	if (bo->mem.mem_type != TTM_PL_VRAM)
1279 		return 0;
1280 
1281 	size = bo->mem.num_pages << PAGE_SHIFT;
1282 	offset = bo->mem.start << PAGE_SHIFT;
1283 	if ((offset + size) <= adev->gmc.visible_vram_size)
1284 		return 0;
1285 
1286 	/* Can't move a pinned BO to visible VRAM */
1287 	if (abo->pin_count > 0)
1288 		return -EINVAL;
1289 
1290 	/* hurrah the memory is not visible ! */
1291 	atomic64_inc(&adev->num_vram_cpu_page_faults);
1292 	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1293 					 AMDGPU_GEM_DOMAIN_GTT);
1294 
1295 	/* Avoid costly evictions; only set GTT as a busy placement */
1296 	abo->placement.num_busy_placement = 1;
1297 	abo->placement.busy_placement = &abo->placements[1];
1298 
1299 	r = ttm_bo_validate(bo, &abo->placement, &ctx);
1300 	if (unlikely(r != 0))
1301 		return r;
1302 
1303 	offset = bo->mem.start << PAGE_SHIFT;
1304 	/* this should never happen */
1305 	if (bo->mem.mem_type == TTM_PL_VRAM &&
1306 	    (offset + size) > adev->gmc.visible_vram_size)
1307 		return -EINVAL;
1308 
1309 	return 0;
1310 }
1311 
1312 /**
1313  * amdgpu_bo_fence - add fence to buffer object
1314  *
1315  * @bo: buffer object in question
1316  * @fence: fence to add
1317  * @shared: true if fence should be added shared
1318  *
1319  */
1320 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1321 		     bool shared)
1322 {
1323 	struct reservation_object *resv = bo->tbo.resv;
1324 
1325 	if (shared)
1326 		reservation_object_add_shared_fence(resv, fence);
1327 	else
1328 		reservation_object_add_excl_fence(resv, fence);
1329 }
1330 
1331 /**
1332  * amdgpu_bo_gpu_offset - return GPU offset of bo
1333  * @bo:	amdgpu object for which we query the offset
1334  *
1335  * Note: object should either be pinned or reserved when calling this
1336  * function, it might be useful to add check for this for debugging.
1337  *
1338  * Returns:
1339  * current GPU offset of the object.
1340  */
1341 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1342 {
1343 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1344 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
1345 		     !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
1346 	WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
1347 		     !bo->pin_count);
1348 	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1349 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1350 		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1351 
1352 	return bo->tbo.offset;
1353 }
1354 
1355 /**
1356  * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
1357  * @adev: amdgpu device object
1358  * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1359  *
1360  * Returns:
1361  * Which of the allowed domains is preferred for pinning the BO for scanout.
1362  */
1363 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1364 					    uint32_t domain)
1365 {
1366 	if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1367 		domain = AMDGPU_GEM_DOMAIN_VRAM;
1368 		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1369 			domain = AMDGPU_GEM_DOMAIN_GTT;
1370 	}
1371 	return domain;
1372 }
1373