12fbd6f94SChristian König /*
22fbd6f94SChristian König  * Copyright 2019 Advanced Micro Devices, Inc.
32fbd6f94SChristian König  *
42fbd6f94SChristian König  * Permission is hereby granted, free of charge, to any person obtaining a
52fbd6f94SChristian König  * copy of this software and associated documentation files (the "Software"),
62fbd6f94SChristian König  * to deal in the Software without restriction, including without limitation
72fbd6f94SChristian König  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
82fbd6f94SChristian König  * and/or sell copies of the Software, and to permit persons to whom the
92fbd6f94SChristian König  * Software is furnished to do so, subject to the following conditions:
102fbd6f94SChristian König  *
112fbd6f94SChristian König  * The above copyright notice and this permission notice shall be included in
122fbd6f94SChristian König  * all copies or substantial portions of the Software.
132fbd6f94SChristian König  *
142fbd6f94SChristian König  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
152fbd6f94SChristian König  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
162fbd6f94SChristian König  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
172fbd6f94SChristian König  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
182fbd6f94SChristian König  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
192fbd6f94SChristian König  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
202fbd6f94SChristian König  * OTHER DEALINGS IN THE SOFTWARE.
212fbd6f94SChristian König  *
222fbd6f94SChristian König  * based on nouveau_prime.c
232fbd6f94SChristian König  *
242fbd6f94SChristian König  * Authors: Alex Deucher
252fbd6f94SChristian König  */
262fbd6f94SChristian König 
272fbd6f94SChristian König /**
282fbd6f94SChristian König  * DOC: PRIME Buffer Sharing
292fbd6f94SChristian König  *
302fbd6f94SChristian König  * The following callback implementations are used for :ref:`sharing GEM buffer
312fbd6f94SChristian König  * objects between different devices via PRIME <prime_buffer_sharing>`.
322fbd6f94SChristian König  */
332fbd6f94SChristian König 
342fbd6f94SChristian König #include "amdgpu.h"
352fbd6f94SChristian König #include "amdgpu_display.h"
362fbd6f94SChristian König #include "amdgpu_gem.h"
376e6db272SChristian König #include "amdgpu_dma_buf.h"
380cf0ee98SArunpravin #include "amdgpu_xgmi.h"
3974ef9527SYunxiang Li #include "amdgpu_vm.h"
402fbd6f94SChristian König #include <drm/amdgpu_drm.h>
41a3185f91SChristian König #include <drm/ttm/ttm_tt.h>
422fbd6f94SChristian König #include <linux/dma-buf.h>
432fbd6f94SChristian König #include <linux/dma-fence-array.h>
4448262cd9SChristian König #include <linux/pci-p2pdma.h>
452fbd6f94SChristian König 
46a92741e7SFelix Kuehling static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops;
47a92741e7SFelix Kuehling 
48a92741e7SFelix Kuehling /**
49a92741e7SFelix Kuehling  * dma_buf_attach_adev - Helper to get adev of an attachment
50a92741e7SFelix Kuehling  *
51a92741e7SFelix Kuehling  * @attach: attachment
52a92741e7SFelix Kuehling  *
53a92741e7SFelix Kuehling  * Returns:
54a92741e7SFelix Kuehling  * A struct amdgpu_device * if the attaching device is an amdgpu device or
55a92741e7SFelix Kuehling  * partition, NULL otherwise.
56a92741e7SFelix Kuehling  */
dma_buf_attach_adev(struct dma_buf_attachment * attach)57a92741e7SFelix Kuehling static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach)
58a92741e7SFelix Kuehling {
59a92741e7SFelix Kuehling 	if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) {
60a92741e7SFelix Kuehling 		struct drm_gem_object *obj = attach->importer_priv;
61a92741e7SFelix Kuehling 		struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
62a92741e7SFelix Kuehling 
63a92741e7SFelix Kuehling 		return amdgpu_ttm_adev(bo->tbo.bdev);
64a92741e7SFelix Kuehling 	}
65a92741e7SFelix Kuehling 
66a92741e7SFelix Kuehling 	return NULL;
67a92741e7SFelix Kuehling }
68a92741e7SFelix Kuehling 
692fbd6f94SChristian König /**
706e6db272SChristian König  * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
712fbd6f94SChristian König  *
726e6db272SChristian König  * @dmabuf: DMA-buf where we attach to
736e6db272SChristian König  * @attach: attachment to add
742fbd6f94SChristian König  *
756e6db272SChristian König  * Add the attachment as user to the exported DMA-buf.
762fbd6f94SChristian König  */
amdgpu_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)776e6db272SChristian König static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
782fbd6f94SChristian König 				 struct dma_buf_attachment *attach)
792fbd6f94SChristian König {
80a92741e7SFelix Kuehling 	struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach);
816e6db272SChristian König 	struct drm_gem_object *obj = dmabuf->priv;
822fbd6f94SChristian König 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
832fbd6f94SChristian König 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
842fbd6f94SChristian König 
85a92741e7SFelix Kuehling 	if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
86a92741e7SFelix Kuehling 	    pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
8748262cd9SChristian König 		attach->peer2peer = false;
8848262cd9SChristian König 
8974ef9527SYunxiang Li 	amdgpu_vm_bo_update_shared(bo);
9074ef9527SYunxiang Li 
916e6db272SChristian König 	return 0;
926e6db272SChristian König }
936e6db272SChristian König 
946e6db272SChristian König /**
952d4dad27SChristian König  * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
962d4dad27SChristian König  *
972d4dad27SChristian König  * @attach: attachment to pin down
982d4dad27SChristian König  *
992d4dad27SChristian König  * Pin the BO which is backing the DMA-buf so that it can't move any more.
1002d4dad27SChristian König  */
amdgpu_dma_buf_pin(struct dma_buf_attachment * attach)1012d4dad27SChristian König static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
1022d4dad27SChristian König {
103f5e7fabdSChristian König 	struct dma_buf *dmabuf = attach->dmabuf;
104f5e7fabdSChristian König 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv);
1055cf3c602SFelix Kuehling 	u32 domains = bo->allowed_domains;
1062d4dad27SChristian König 
107f5e7fabdSChristian König 	dma_resv_assert_held(dmabuf->resv);
108f5e7fabdSChristian König 
1095e56935bSFelix Kuehling 	/* Try pinning into VRAM to allow P2P with RDMA NICs without ODP
110f5e7fabdSChristian König 	 * support if all attachments can do P2P. If any attachment can't do
111f5e7fabdSChristian König 	 * P2P just pin into GTT instead.
1125e56935bSFelix Kuehling 	 *
1135e56935bSFelix Kuehling 	 * To avoid with conflicting pinnings between GPUs and RDMA when move
1145e56935bSFelix Kuehling 	 * notifiers are disabled, only allow pinning in VRAM when move
1155e56935bSFelix Kuehling 	 * notiers are enabled.
116f5e7fabdSChristian König 	 */
1175e56935bSFelix Kuehling 	if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1185e56935bSFelix Kuehling 		domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
1195e56935bSFelix Kuehling 	} else {
120f5e7fabdSChristian König 		list_for_each_entry(attach, &dmabuf->attachments, node)
121f5e7fabdSChristian König 			if (!attach->peer2peer)
122f5e7fabdSChristian König 				domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
1235e56935bSFelix Kuehling 	}
124f5e7fabdSChristian König 
125f5e7fabdSChristian König 	if (domains & AMDGPU_GEM_DOMAIN_VRAM)
126f5e7fabdSChristian König 		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
127f5e7fabdSChristian König 
1285cf3c602SFelix Kuehling 	if (WARN_ON(!domains))
1295cf3c602SFelix Kuehling 		return -EINVAL;
1305cf3c602SFelix Kuehling 
131f5e7fabdSChristian König 	return amdgpu_bo_pin(bo, domains);
1322d4dad27SChristian König }
1332d4dad27SChristian König 
1342d4dad27SChristian König /**
1352d4dad27SChristian König  * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
1362d4dad27SChristian König  *
1372d4dad27SChristian König  * @attach: attachment to unpin
1382d4dad27SChristian König  *
1392d4dad27SChristian König  * Unpin a previously pinned BO to make it movable again.
1402d4dad27SChristian König  */
amdgpu_dma_buf_unpin(struct dma_buf_attachment * attach)1412d4dad27SChristian König static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
1422d4dad27SChristian König {
1432d4dad27SChristian König 	struct drm_gem_object *obj = attach->dmabuf->priv;
1442d4dad27SChristian König 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
1452d4dad27SChristian König 
1462d4dad27SChristian König 	amdgpu_bo_unpin(bo);
1472d4dad27SChristian König }
1482d4dad27SChristian König 
1492d4dad27SChristian König /**
1506e6db272SChristian König  * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
1512fbd6f94SChristian König  * @attach: DMA-buf attachment
1526e6db272SChristian König  * @dir: DMA direction
1536e6db272SChristian König  *
1546e6db272SChristian König  * Makes sure that the shared DMA buffer can be accessed by the target device.
1556e6db272SChristian König  * For now, simply pins it to the GTT domain, where it should be accessible by
1566e6db272SChristian König  * all DMA devices.
1576e6db272SChristian König  *
1586e6db272SChristian König  * Returns:
1596e6db272SChristian König  * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
1606e6db272SChristian König  * code.
1616e6db272SChristian König  */
amdgpu_dma_buf_map(struct dma_buf_attachment * attach,enum dma_data_direction dir)1626e6db272SChristian König static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
1636e6db272SChristian König 					   enum dma_data_direction dir)
1646e6db272SChristian König {
1656e6db272SChristian König 	struct dma_buf *dma_buf = attach->dmabuf;
1666e6db272SChristian König 	struct drm_gem_object *obj = dma_buf->priv;
1676e6db272SChristian König 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
168f44ffd67SChristian König 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1696e6db272SChristian König 	struct sg_table *sgt;
1706e6db272SChristian König 	long r;
1716e6db272SChristian König 
1724671078eSChristian König 	if (!bo->tbo.pin_count) {
173f44ffd67SChristian König 		/* move buffer into GTT or VRAM */
1742d4dad27SChristian König 		struct ttm_operation_ctx ctx = { false, false };
1759e690184SSrinivasan Shanmugam 		unsigned int domains = AMDGPU_GEM_DOMAIN_GTT;
1762d4dad27SChristian König 
177f44ffd67SChristian König 		if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
178f44ffd67SChristian König 		    attach->peer2peer) {
179f44ffd67SChristian König 			bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
180f44ffd67SChristian König 			domains |= AMDGPU_GEM_DOMAIN_VRAM;
181f44ffd67SChristian König 		}
182f44ffd67SChristian König 		amdgpu_bo_placement_from_domain(bo, domains);
1832d4dad27SChristian König 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1846e6db272SChristian König 		if (r)
1856e6db272SChristian König 			return ERR_PTR(r);
1862d4dad27SChristian König 	}
1872d4dad27SChristian König 
188d3116756SChristian König 	switch (bo->tbo.resource->mem_type) {
189f44ffd67SChristian König 	case TTM_PL_TT:
190707d561fSGerd Hoffmann 		sgt = drm_prime_pages_to_sg(obj->dev,
191707d561fSGerd Hoffmann 					    bo->tbo.ttm->pages,
192e11bfb99SChristian König 					    bo->tbo.ttm->num_pages);
1936e6db272SChristian König 		if (IS_ERR(sgt))
1946e6db272SChristian König 			return sgt;
1956e6db272SChristian König 
19639913934SMarek Szyprowski 		if (dma_map_sgtable(attach->dev, sgt, dir,
1976e6db272SChristian König 				    DMA_ATTR_SKIP_CPU_SYNC))
1986e6db272SChristian König 			goto error_free;
199f44ffd67SChristian König 		break;
200f44ffd67SChristian König 
201f44ffd67SChristian König 	case TTM_PL_VRAM:
202*9397204fSFelix Kuehling 		/* XGMI-accessible memory should never be DMA-mapped */
203*9397204fSFelix Kuehling 		if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible(
204*9397204fSFelix Kuehling 				dma_buf_attach_adev(attach), bo)))
205*9397204fSFelix Kuehling 			return ERR_PTR(-EINVAL);
206*9397204fSFelix Kuehling 
207d3116756SChristian König 		r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
208d3116756SChristian König 					      bo->tbo.base.size, attach->dev,
209d3116756SChristian König 					      dir, &sgt);
210f44ffd67SChristian König 		if (r)
211f44ffd67SChristian König 			return ERR_PTR(r);
212f44ffd67SChristian König 		break;
213f44ffd67SChristian König 	default:
214f44ffd67SChristian König 		return ERR_PTR(-EINVAL);
215f44ffd67SChristian König 	}
2166e6db272SChristian König 
2176e6db272SChristian König 	return sgt;
2186e6db272SChristian König 
2196e6db272SChristian König error_free:
2206e6db272SChristian König 	sg_free_table(sgt);
2216e6db272SChristian König 	kfree(sgt);
222f44ffd67SChristian König 	return ERR_PTR(-EBUSY);
2236e6db272SChristian König }
2246e6db272SChristian König 
2256e6db272SChristian König /**
2266e6db272SChristian König  * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
2276e6db272SChristian König  * @attach: DMA-buf attachment
2286e6db272SChristian König  * @sgt: sg_table to unmap
2296e6db272SChristian König  * @dir: DMA direction
2302fbd6f94SChristian König  *
2312fbd6f94SChristian König  * This is called when a shared DMA buffer no longer needs to be accessible by
2322fbd6f94SChristian König  * another device. For now, simply unpins the buffer from GTT.
2332fbd6f94SChristian König  */
amdgpu_dma_buf_unmap(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)2346e6db272SChristian König static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
2356e6db272SChristian König 				 struct sg_table *sgt,
2366e6db272SChristian König 				 enum dma_data_direction dir)
2372fbd6f94SChristian König {
238c0dd8a92SMatthew Auld 	if (sg_page(sgt->sgl)) {
23939913934SMarek Szyprowski 		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
2406e6db272SChristian König 		sg_free_table(sgt);
2416e6db272SChristian König 		kfree(sgt);
242f44ffd67SChristian König 	} else {
2435392b2afSRamesh Errabolu 		amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
244f44ffd67SChristian König 	}
2452fbd6f94SChristian König }
2462fbd6f94SChristian König 
2472fbd6f94SChristian König /**
2482fbd6f94SChristian König  * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
2492fbd6f94SChristian König  * @dma_buf: Shared DMA buffer
2502fbd6f94SChristian König  * @direction: Direction of DMA transfer
2512fbd6f94SChristian König  *
2522fbd6f94SChristian König  * This is called before CPU access to the shared DMA buffer's memory. If it's
2532fbd6f94SChristian König  * a read access, the buffer is moved to the GTT domain if possible, for optimal
2542fbd6f94SChristian König  * CPU read performance.
2552fbd6f94SChristian König  *
2562fbd6f94SChristian König  * Returns:
2572fbd6f94SChristian König  * 0 on success or a negative error code on failure.
2582fbd6f94SChristian König  */
amdgpu_dma_buf_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)2592fbd6f94SChristian König static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
2602fbd6f94SChristian König 					   enum dma_data_direction direction)
2612fbd6f94SChristian König {
2622fbd6f94SChristian König 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
2632fbd6f94SChristian König 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2642fbd6f94SChristian König 	struct ttm_operation_ctx ctx = { true, false };
265f2bd8a0eSAndrey Grodzovsky 	u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
2662fbd6f94SChristian König 	int ret;
2672fbd6f94SChristian König 	bool reads = (direction == DMA_BIDIRECTIONAL ||
2682fbd6f94SChristian König 		      direction == DMA_FROM_DEVICE);
2692fbd6f94SChristian König 
2702fbd6f94SChristian König 	if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
2712fbd6f94SChristian König 		return 0;
2722fbd6f94SChristian König 
2732fbd6f94SChristian König 	/* move to gtt */
2742fbd6f94SChristian König 	ret = amdgpu_bo_reserve(bo, false);
2752fbd6f94SChristian König 	if (unlikely(ret != 0))
2762fbd6f94SChristian König 		return ret;
2772fbd6f94SChristian König 
2784671078eSChristian König 	if (!bo->tbo.pin_count &&
2794671078eSChristian König 	    (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
2802fbd6f94SChristian König 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
2812fbd6f94SChristian König 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2822fbd6f94SChristian König 	}
2832fbd6f94SChristian König 
2842fbd6f94SChristian König 	amdgpu_bo_unreserve(bo);
2852fbd6f94SChristian König 	return ret;
2862fbd6f94SChristian König }
2872fbd6f94SChristian König 
2882fbd6f94SChristian König const struct dma_buf_ops amdgpu_dmabuf_ops = {
2896e6db272SChristian König 	.attach = amdgpu_dma_buf_attach,
2902d4dad27SChristian König 	.pin = amdgpu_dma_buf_pin,
2912d4dad27SChristian König 	.unpin = amdgpu_dma_buf_unpin,
2926e6db272SChristian König 	.map_dma_buf = amdgpu_dma_buf_map,
2936e6db272SChristian König 	.unmap_dma_buf = amdgpu_dma_buf_unmap,
2942fbd6f94SChristian König 	.release = drm_gem_dmabuf_release,
2952fbd6f94SChristian König 	.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
2962fbd6f94SChristian König 	.mmap = drm_gem_dmabuf_mmap,
2972fbd6f94SChristian König 	.vmap = drm_gem_dmabuf_vmap,
2982fbd6f94SChristian König 	.vunmap = drm_gem_dmabuf_vunmap,
2992fbd6f94SChristian König };
3002fbd6f94SChristian König 
3012fbd6f94SChristian König /**
3022fbd6f94SChristian König  * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
3032fbd6f94SChristian König  * @gobj: GEM BO
3042fbd6f94SChristian König  * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
3052fbd6f94SChristian König  *
306b2ad978fSDaniel Vetter  * The main work is done by the &drm_gem_prime_export helper.
3072fbd6f94SChristian König  *
3082fbd6f94SChristian König  * Returns:
3092fbd6f94SChristian König  * Shared DMA buffer representing the GEM BO from the given device.
3102fbd6f94SChristian König  */
amdgpu_gem_prime_export(struct drm_gem_object * gobj,int flags)311e4fa8457SDaniel Vetter struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
3122fbd6f94SChristian König 					int flags)
3132fbd6f94SChristian König {
3142fbd6f94SChristian König 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
3152fbd6f94SChristian König 	struct dma_buf *buf;
3162fbd6f94SChristian König 
3172fbd6f94SChristian König 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
3182fbd6f94SChristian König 	    bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
3192fbd6f94SChristian König 		return ERR_PTR(-EPERM);
3202fbd6f94SChristian König 
321e4fa8457SDaniel Vetter 	buf = drm_gem_prime_export(gobj, flags);
322b3fac52cSGerd Hoffmann 	if (!IS_ERR(buf))
3232fbd6f94SChristian König 		buf->ops = &amdgpu_dmabuf_ops;
3242fbd6f94SChristian König 
3252fbd6f94SChristian König 	return buf;
3262fbd6f94SChristian König }
3272fbd6f94SChristian König 
3282fbd6f94SChristian König /**
329a3941471SChristian König  * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
3302fbd6f94SChristian König  *
331a3941471SChristian König  * @dev: DRM device
332a3941471SChristian König  * @dma_buf: DMA-buf
333a3941471SChristian König  *
334a3941471SChristian König  * Creates an empty SG BO for DMA-buf import.
3352fbd6f94SChristian König  *
3362fbd6f94SChristian König  * Returns:
3372fbd6f94SChristian König  * A new GEM BO of the given DRM device, representing the memory
3382fbd6f94SChristian König  * described by the given DMA-buf attachment and scatter/gather table.
3392fbd6f94SChristian König  */
340a3941471SChristian König static struct drm_gem_object *
amdgpu_dma_buf_create_obj(struct drm_device * dev,struct dma_buf * dma_buf)341a3941471SChristian König amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
3422fbd6f94SChristian König {
343a3941471SChristian König 	struct dma_resv *resv = dma_buf->resv;
3441348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
345f8aab604SAndrey Grodzovsky 	struct drm_gem_object *gobj;
346e36ccf9aSShashank Sharma 	struct amdgpu_bo *bo;
347e36ccf9aSShashank Sharma 	uint64_t flags = 0;
3482fbd6f94SChristian König 	int ret;
3492fbd6f94SChristian König 
35052791eeeSChristian König 	dma_resv_lock(resv, NULL);
351e36ccf9aSShashank Sharma 
352e36ccf9aSShashank Sharma 	if (dma_buf->ops == &amdgpu_dmabuf_ops) {
353e36ccf9aSShashank Sharma 		struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
354e36ccf9aSShashank Sharma 
355d1a372afSFelix Kuehling 		flags |= other->flags & (AMDGPU_GEM_CREATE_CPU_GTT_USWC |
356d1a372afSFelix Kuehling 					 AMDGPU_GEM_CREATE_COHERENT |
3575f248462SDavid Francis 					 AMDGPU_GEM_CREATE_EXT_COHERENT |
358d1a372afSFelix Kuehling 					 AMDGPU_GEM_CREATE_UNCACHED);
359e36ccf9aSShashank Sharma 	}
360e36ccf9aSShashank Sharma 
361f8aab604SAndrey Grodzovsky 	ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
362e36ccf9aSShashank Sharma 				       AMDGPU_GEM_DOMAIN_CPU, flags,
363f24e924bSPhilip Yang 				       ttm_bo_type_sg, resv, &gobj, 0);
3642fbd6f94SChristian König 	if (ret)
3652fbd6f94SChristian König 		goto error;
3662fbd6f94SChristian König 
367f8aab604SAndrey Grodzovsky 	bo = gem_to_amdgpu_bo(gobj);
3682fbd6f94SChristian König 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
3692fbd6f94SChristian König 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
3702fbd6f94SChristian König 
37152791eeeSChristian König 	dma_resv_unlock(resv);
372f8aab604SAndrey Grodzovsky 	return gobj;
3732fbd6f94SChristian König 
3742fbd6f94SChristian König error:
37552791eeeSChristian König 	dma_resv_unlock(resv);
3762fbd6f94SChristian König 	return ERR_PTR(ret);
3772fbd6f94SChristian König }
3782fbd6f94SChristian König 
379a448cb00SChristian König /**
380a448cb00SChristian König  * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
381a448cb00SChristian König  *
382a448cb00SChristian König  * @attach: the DMA-buf attachment
383a448cb00SChristian König  *
384a448cb00SChristian König  * Invalidate the DMA-buf attachment, making sure that the we re-create the
385a448cb00SChristian König  * mapping before the next use.
386a448cb00SChristian König  */
387a448cb00SChristian König static void
amdgpu_dma_buf_move_notify(struct dma_buf_attachment * attach)388a448cb00SChristian König amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
389a448cb00SChristian König {
390a448cb00SChristian König 	struct drm_gem_object *obj = attach->importer_priv;
391a448cb00SChristian König 	struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
392a448cb00SChristian König 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
393a448cb00SChristian König 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
394a448cb00SChristian König 	struct ttm_operation_ctx ctx = { false, false };
395a448cb00SChristian König 	struct ttm_placement placement = {};
396a448cb00SChristian König 	struct amdgpu_vm_bo_base *bo_base;
397a448cb00SChristian König 	int r;
398a448cb00SChristian König 
39950661eb1SFelix Kuehling 	/* FIXME: This should be after the "if", but needs a fix to make sure
40050661eb1SFelix Kuehling 	 * DMABuf imports are initialized in the right VM list.
40150661eb1SFelix Kuehling 	 */
402a541a6e8SYunxiang Li 	amdgpu_vm_bo_invalidate(bo, false);
403fc74881cSChristian König 	if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
404a448cb00SChristian König 		return;
405a448cb00SChristian König 
406a448cb00SChristian König 	r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
407a448cb00SChristian König 	if (r) {
408a448cb00SChristian König 		DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
409a448cb00SChristian König 		return;
410a448cb00SChristian König 	}
411a448cb00SChristian König 
412a448cb00SChristian König 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
413a448cb00SChristian König 		struct amdgpu_vm *vm = bo_base->vm;
414391629bdSNirmoy Das 		struct dma_resv *resv = vm->root.bo->tbo.base.resv;
415a448cb00SChristian König 
416a448cb00SChristian König 		if (ticket) {
417a448cb00SChristian König 			/* When we get an error here it means that somebody
418a448cb00SChristian König 			 * else is holding the VM lock and updating page tables
419a448cb00SChristian König 			 * So we can just continue here.
420a448cb00SChristian König 			 */
421a448cb00SChristian König 			r = dma_resv_lock(resv, ticket);
422a448cb00SChristian König 			if (r)
423a448cb00SChristian König 				continue;
424a448cb00SChristian König 
425a448cb00SChristian König 		} else {
426a448cb00SChristian König 			/* TODO: This is more problematic and we actually need
427a448cb00SChristian König 			 * to allow page tables updates without holding the
428a448cb00SChristian König 			 * lock.
429a448cb00SChristian König 			 */
430a448cb00SChristian König 			if (!dma_resv_trylock(resv))
431a448cb00SChristian König 				continue;
432a448cb00SChristian König 		}
433a448cb00SChristian König 
434316baf09SFelix Kuehling 		/* Reserve fences for two SDMA page table updates */
435316baf09SFelix Kuehling 		r = dma_resv_reserve_fences(resv, 2);
436316baf09SFelix Kuehling 		if (!r)
437a448cb00SChristian König 			r = amdgpu_vm_clear_freed(adev, vm, NULL);
438a448cb00SChristian König 		if (!r)
4395a104cb9SFelix Kuehling 			r = amdgpu_vm_handle_moved(adev, vm, ticket);
440a448cb00SChristian König 
441a448cb00SChristian König 		if (r && r != -EBUSY)
442a448cb00SChristian König 			DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
443a448cb00SChristian König 				  r);
444a448cb00SChristian König 
445a448cb00SChristian König 		dma_resv_unlock(resv);
446a448cb00SChristian König 	}
447a448cb00SChristian König }
448a448cb00SChristian König 
449bb42df46SChristian König static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
45057b7b62fSChristian König 	.allow_peer2peer = true,
451a448cb00SChristian König 	.move_notify = amdgpu_dma_buf_move_notify
452bb42df46SChristian König };
453bb42df46SChristian König 
4542fbd6f94SChristian König /**
4552fbd6f94SChristian König  * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
4562fbd6f94SChristian König  * @dev: DRM device
4572fbd6f94SChristian König  * @dma_buf: Shared DMA buffer
4582fbd6f94SChristian König  *
459a3941471SChristian König  * Import a dma_buf into a the driver and potentially create a new GEM object.
4602fbd6f94SChristian König  *
4612fbd6f94SChristian König  * Returns:
4622fbd6f94SChristian König  * GEM BO representing the shared DMA buffer for the given device.
4632fbd6f94SChristian König  */
amdgpu_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)4642fbd6f94SChristian König struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
4652fbd6f94SChristian König 					       struct dma_buf *dma_buf)
4662fbd6f94SChristian König {
467a3941471SChristian König 	struct dma_buf_attachment *attach;
4682fbd6f94SChristian König 	struct drm_gem_object *obj;
4692fbd6f94SChristian König 
4702fbd6f94SChristian König 	if (dma_buf->ops == &amdgpu_dmabuf_ops) {
4712fbd6f94SChristian König 		obj = dma_buf->priv;
4722fbd6f94SChristian König 		if (obj->dev == dev) {
4732fbd6f94SChristian König 			/*
4742fbd6f94SChristian König 			 * Importing dmabuf exported from out own gem increases
4752fbd6f94SChristian König 			 * refcount on gem itself instead of f_count of dmabuf.
4762fbd6f94SChristian König 			 */
4772fbd6f94SChristian König 			drm_gem_object_get(obj);
4782fbd6f94SChristian König 			return obj;
4792fbd6f94SChristian König 		}
4802fbd6f94SChristian König 	}
4812fbd6f94SChristian König 
482a3941471SChristian König 	obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
483a3941471SChristian König 	if (IS_ERR(obj))
484a3941471SChristian König 		return obj;
485a3941471SChristian König 
486bb42df46SChristian König 	attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
487a448cb00SChristian König 					&amdgpu_dma_buf_attach_ops, obj);
488a3941471SChristian König 	if (IS_ERR(attach)) {
489e07ddb0cSEmil Velikov 		drm_gem_object_put(obj);
490a3941471SChristian König 		return ERR_CAST(attach);
491a3941471SChristian König 	}
492a3941471SChristian König 
493a3941471SChristian König 	get_dma_buf(dma_buf);
494a3941471SChristian König 	obj->import_attach = attach;
495a3941471SChristian König 	return obj;
4962fbd6f94SChristian König }
4970cf0ee98SArunpravin 
4980cf0ee98SArunpravin /**
4990cf0ee98SArunpravin  * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
5000cf0ee98SArunpravin  *
5010cf0ee98SArunpravin  * @adev: amdgpu_device pointer of the importer
5020cf0ee98SArunpravin  * @bo: amdgpu buffer object
5030cf0ee98SArunpravin  *
5040cf0ee98SArunpravin  * Returns:
5050cf0ee98SArunpravin  * True if dmabuf accessible over xgmi, false otherwise.
5060cf0ee98SArunpravin  */
amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device * adev,struct amdgpu_bo * bo)5070cf0ee98SArunpravin bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
5080cf0ee98SArunpravin 				      struct amdgpu_bo *bo)
5090cf0ee98SArunpravin {
5100cf0ee98SArunpravin 	struct drm_gem_object *obj = &bo->tbo.base;
5110cf0ee98SArunpravin 	struct drm_gem_object *gobj;
5120cf0ee98SArunpravin 
513a92741e7SFelix Kuehling 	if (!adev)
514a92741e7SFelix Kuehling 		return false;
515a92741e7SFelix Kuehling 
5160cf0ee98SArunpravin 	if (obj->import_attach) {
5170cf0ee98SArunpravin 		struct dma_buf *dma_buf = obj->import_attach->dmabuf;
5180cf0ee98SArunpravin 
5190cf0ee98SArunpravin 		if (dma_buf->ops != &amdgpu_dmabuf_ops)
5200cf0ee98SArunpravin 			/* No XGMI with non AMD GPUs */
5210cf0ee98SArunpravin 			return false;
5220cf0ee98SArunpravin 
5230cf0ee98SArunpravin 		gobj = dma_buf->priv;
5240cf0ee98SArunpravin 		bo = gem_to_amdgpu_bo(gobj);
5250cf0ee98SArunpravin 	}
5260cf0ee98SArunpravin 
5270cf0ee98SArunpravin 	if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
5280cf0ee98SArunpravin 			(bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
5290cf0ee98SArunpravin 		return true;
5300cf0ee98SArunpravin 
5310cf0ee98SArunpravin 	return false;
5320cf0ee98SArunpravin }
533