1d38ceaf9SAlex Deucher /*
2d38ceaf9SAlex Deucher * Copyright 2008 Advanced Micro Devices, Inc.
3d38ceaf9SAlex Deucher * Copyright 2008 Red Hat Inc.
4d38ceaf9SAlex Deucher * Copyright 2009 Jerome Glisse.
5d38ceaf9SAlex Deucher *
6d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a
7d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the "Software"),
8d38ceaf9SAlex Deucher * to deal in the Software without restriction, including without limitation
9d38ceaf9SAlex Deucher * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10d38ceaf9SAlex Deucher * and/or sell copies of the Software, and to permit persons to whom the
11d38ceaf9SAlex Deucher * Software is furnished to do so, subject to the following conditions:
12d38ceaf9SAlex Deucher *
13d38ceaf9SAlex Deucher * The above copyright notice and this permission notice shall be included in
14d38ceaf9SAlex Deucher * all copies or substantial portions of the Software.
15d38ceaf9SAlex Deucher *
16d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20d38ceaf9SAlex Deucher * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21d38ceaf9SAlex Deucher * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22d38ceaf9SAlex Deucher * OTHER DEALINGS IN THE SOFTWARE.
23d38ceaf9SAlex Deucher *
24d38ceaf9SAlex Deucher * Authors: Dave Airlie
25d38ceaf9SAlex Deucher * Alex Deucher
26d38ceaf9SAlex Deucher * Jerome Glisse
27d38ceaf9SAlex Deucher */
28d38ceaf9SAlex Deucher #include <linux/ktime.h>
29fdf2f6c5SSam Ravnborg #include <linux/module.h>
30568d7c76SStephen Rothwell #include <linux/pagemap.h>
31fdf2f6c5SSam Ravnborg #include <linux/pci.h>
328523f887SChristian König #include <linux/dma-buf.h>
33fdf2f6c5SSam Ravnborg
34d38ceaf9SAlex Deucher #include <drm/amdgpu_drm.h>
3571df0368SThomas Zimmermann #include <drm/drm_drv.h>
368a206685SChristian König #include <drm/drm_exec.h>
3749a3f51dSThomas Zimmermann #include <drm/drm_gem_ttm_helper.h>
38a3185f91SChristian König #include <drm/ttm/ttm_tt.h>
39fdf2f6c5SSam Ravnborg
40d38ceaf9SAlex Deucher #include "amdgpu.h"
41bda31a24SDeepak Sharma #include "amdgpu_display.h"
42246cb7e4SThomas Zimmermann #include "amdgpu_dma_buf.h"
43d9483ecdSChristian König #include "amdgpu_hmm.h"
44b4ae4fe6Sshaoyunl #include "amdgpu_xgmi.h"
45d38ceaf9SAlex Deucher #include "amdgpu_vm.h"
4671df0368SThomas Zimmermann
amdgpu_gem_fault(struct vm_fault * vmf)4771df0368SThomas Zimmermann static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
4871df0368SThomas Zimmermann {
4971df0368SThomas Zimmermann struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
5071df0368SThomas Zimmermann struct drm_device *ddev = bo->base.dev;
5171df0368SThomas Zimmermann vm_fault_t ret;
5271df0368SThomas Zimmermann int idx;
5371df0368SThomas Zimmermann
5471df0368SThomas Zimmermann ret = ttm_bo_vm_reserve(bo, vmf);
5571df0368SThomas Zimmermann if (ret)
5671df0368SThomas Zimmermann return ret;
5771df0368SThomas Zimmermann
5871df0368SThomas Zimmermann if (drm_dev_enter(ddev, &idx)) {
5971df0368SThomas Zimmermann ret = amdgpu_bo_fault_reserve_notify(bo);
6071df0368SThomas Zimmermann if (ret) {
6171df0368SThomas Zimmermann drm_dev_exit(idx);
6271df0368SThomas Zimmermann goto unlock;
6371df0368SThomas Zimmermann }
6471df0368SThomas Zimmermann
650d979509SJason Gunthorpe ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
6671df0368SThomas Zimmermann TTM_BO_VM_NUM_PREFAULT);
6771df0368SThomas Zimmermann
6871df0368SThomas Zimmermann drm_dev_exit(idx);
6971df0368SThomas Zimmermann } else {
7071df0368SThomas Zimmermann ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
7171df0368SThomas Zimmermann }
7271df0368SThomas Zimmermann if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
7371df0368SThomas Zimmermann return ret;
7471df0368SThomas Zimmermann
7571df0368SThomas Zimmermann unlock:
7671df0368SThomas Zimmermann dma_resv_unlock(bo->base.resv);
7771df0368SThomas Zimmermann return ret;
7871df0368SThomas Zimmermann }
7971df0368SThomas Zimmermann
8071df0368SThomas Zimmermann static const struct vm_operations_struct amdgpu_gem_vm_ops = {
8171df0368SThomas Zimmermann .fault = amdgpu_gem_fault,
8271df0368SThomas Zimmermann .open = ttm_bo_vm_open,
8371df0368SThomas Zimmermann .close = ttm_bo_vm_close,
8471df0368SThomas Zimmermann .access = ttm_bo_vm_access
8571df0368SThomas Zimmermann };
86246cb7e4SThomas Zimmermann
amdgpu_gem_object_free(struct drm_gem_object * gobj)87d38ceaf9SAlex Deucher static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
88*6dcba097SChristian König {
89d38ceaf9SAlex Deucher struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
90*6dcba097SChristian König
91*6dcba097SChristian König amdgpu_hmm_unregister(aobj);
92d38ceaf9SAlex Deucher ttm_bo_put(&aobj->tbo);
93d38ceaf9SAlex Deucher }
94d38ceaf9SAlex Deucher
amdgpu_gem_object_create(struct amdgpu_device * adev,unsigned long size,int alignment,u32 initial_domain,u64 flags,enum ttm_bo_type type,struct dma_resv * resv,struct drm_gem_object ** obj,int8_t xcp_id_plus1)95d38ceaf9SAlex Deucher int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
96eab3de23SChristian König int alignment, u32 initial_domain,
9752791eeeSChristian König u64 flags, enum ttm_bo_type type,
983ebfd221SPhilip Yang struct dma_resv *resv,
99d38ceaf9SAlex Deucher struct drm_gem_object **obj, int8_t xcp_id_plus1)
100e1eb899bSChristian König {
10122b40f7aSNirmoy Das struct amdgpu_bo *bo;
1023216c6b7SChunming Zhou struct amdgpu_bo_user *ubo;
103d38ceaf9SAlex Deucher struct amdgpu_bo_param bp;
104d38ceaf9SAlex Deucher int r;
1053216c6b7SChunming Zhou
106d38ceaf9SAlex Deucher memset(&bp, 0, sizeof(bp));
10731849bf0SArunpravin Paneer Selvam *obj = NULL;
108d38ceaf9SAlex Deucher flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1093216c6b7SChunming Zhou
1103216c6b7SChunming Zhou bp.size = size;
1113216c6b7SChunming Zhou bp.byte_align = alignment;
1123216c6b7SChunming Zhou bp.type = type;
113aa2b2e28SChunming Zhou bp.resv = resv;
1143216c6b7SChunming Zhou bp.preferred_domain = initial_domain;
11547722220SChristian König bp.flags = flags;
1169fd5543eSNirmoy Das bp.domain = initial_domain;
1173ebfd221SPhilip Yang bp.bo_ptr_size = sizeof(struct amdgpu_bo);
1189fd5543eSNirmoy Das bp.xcp_id_plus1 = xcp_id_plus1;
11922b40f7aSNirmoy Das
120f8aab604SAndrey Grodzovsky r = amdgpu_bo_create_user(adev, &bp, &ubo);
121d38ceaf9SAlex Deucher if (r)
122f8aab604SAndrey Grodzovsky return r;
12322b40f7aSNirmoy Das
124c105de28SGerd Hoffmann bo = &ubo->bo;
125d38ceaf9SAlex Deucher *obj = &bo->tbo.base;
126d38ceaf9SAlex Deucher
127d38ceaf9SAlex Deucher return 0;
128d38ceaf9SAlex Deucher }
129418aa0c2SChristian König
amdgpu_gem_force_release(struct amdgpu_device * adev)130d38ceaf9SAlex Deucher void amdgpu_gem_force_release(struct amdgpu_device *adev)
1314a580877SLuben Tuikov {
132418aa0c2SChristian König struct drm_device *ddev = adev_to_drm(adev);
133418aa0c2SChristian König struct drm_file *file;
1341d2ac403SDaniel Vetter
135418aa0c2SChristian König mutex_lock(&ddev->filelist_mutex);
136418aa0c2SChristian König
137418aa0c2SChristian König list_for_each_entry(file, &ddev->filelist, lhead) {
138418aa0c2SChristian König struct drm_gem_object *gobj;
139418aa0c2SChristian König int handle;
140418aa0c2SChristian König
141418aa0c2SChristian König WARN_ONCE(1, "Still active user space clients!\n");
142418aa0c2SChristian König spin_lock(&file->table_lock);
143418aa0c2SChristian König idr_for_each_entry(&file->object_idr, gobj, handle) {
144e07ddb0cSEmil Velikov WARN_ONCE(1, "And also active allocations!\n");
145418aa0c2SChristian König drm_gem_object_put(gobj);
146418aa0c2SChristian König }
147418aa0c2SChristian König idr_destroy(&file->object_idr);
148d38ceaf9SAlex Deucher spin_unlock(&file->table_lock);
149d38ceaf9SAlex Deucher }
1501d2ac403SDaniel Vetter
151d38ceaf9SAlex Deucher mutex_unlock(&ddev->filelist_mutex);
152d38ceaf9SAlex Deucher }
153d38ceaf9SAlex Deucher
154d38ceaf9SAlex Deucher /*
155d38ceaf9SAlex Deucher * Call from drm_gem_handle_create which appear in both new and open ioctl
156d38ceaf9SAlex Deucher * case.
157246cb7e4SThomas Zimmermann */
amdgpu_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)158a7d64de6SChristian König static int amdgpu_gem_object_open(struct drm_gem_object *obj,
159d38ceaf9SAlex Deucher struct drm_file *file_priv)
160765e7fbfSChristian König {
161a7d64de6SChristian König struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
162d38ceaf9SAlex Deucher struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
163d38ceaf9SAlex Deucher struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
164d38ceaf9SAlex Deucher struct amdgpu_vm *vm = &fpriv->vm;
1654f5839c5SChristian König struct amdgpu_bo_va *bo_va;
166d38ceaf9SAlex Deucher struct mm_struct *mm;
1674f5839c5SChristian König int r;
1684f5839c5SChristian König
1694f5839c5SChristian König mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
1704f5839c5SChristian König if (mm && mm != current->mm)
1714f5839c5SChristian König return -EPERM;
172e1eb899bSChristian König
17326e20235STvrtko Ursulin if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
174e1eb899bSChristian König !amdgpu_vm_is_bo_always_valid(vm, abo))
175e1eb899bSChristian König return -EPERM;
176765e7fbfSChristian König
177e98c1b0dSChunming Zhou r = amdgpu_bo_reserve(abo, false);
178d38ceaf9SAlex Deucher if (r)
179d38ceaf9SAlex Deucher return r;
180765e7fbfSChristian König
18130953c4dSSrinivasan Shanmugam amdgpu_vm_bo_update_shared(abo);
182765e7fbfSChristian König bo_va = amdgpu_vm_bo_find(vm, abo);
18330953c4dSSrinivasan Shanmugam if (!bo_va)
184d38ceaf9SAlex Deucher bo_va = amdgpu_vm_bo_add(adev, vm, abo);
185765e7fbfSChristian König else
18650661eb1SFelix Kuehling ++bo_va->ref_count;
18750661eb1SFelix Kuehling amdgpu_bo_unreserve(abo);
18850661eb1SFelix Kuehling
18950661eb1SFelix Kuehling /* Validate and add eviction fence to DMABuf imports with dynamic
19050661eb1SFelix Kuehling * attachment in compute VMs. Re-validation will be done by
19150661eb1SFelix Kuehling * amdgpu_vm_validate. Fences are on the reservation shared with the
19250661eb1SFelix Kuehling * export, which is currently required to be validated and fenced
19350661eb1SFelix Kuehling * already by amdgpu_amdkfd_gpuvm_restore_process_bos.
19450661eb1SFelix Kuehling *
19550661eb1SFelix Kuehling * Nested locking below for the case that a GEM object is opened in
19650661eb1SFelix Kuehling * kfd_mem_export_dmabuf. Since the lock below is only taken for imports,
19750661eb1SFelix Kuehling * but not for export, this is a different lock class that cannot lead to
19850661eb1SFelix Kuehling * circular lock dependencies.
199d38ceaf9SAlex Deucher */
20050661eb1SFelix Kuehling if (!vm->is_compute_context || !vm->process_info)
20150661eb1SFelix Kuehling return 0;
20250661eb1SFelix Kuehling if (!obj->import_attach ||
20350661eb1SFelix Kuehling !dma_buf_is_dynamic(obj->import_attach->dmabuf))
20450661eb1SFelix Kuehling return 0;
20550661eb1SFelix Kuehling mutex_lock_nested(&vm->process_info->lock, 1);
20650661eb1SFelix Kuehling if (!WARN_ON(!vm->process_info->eviction_fence)) {
207b8f67b9dSShashank Sharma r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT,
208b8f67b9dSShashank Sharma &vm->process_info->eviction_fence->base);
209b8f67b9dSShashank Sharma if (r) {
210b8f67b9dSShashank Sharma struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
211b8f67b9dSShashank Sharma
212b8f67b9dSShashank Sharma dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
213b8f67b9dSShashank Sharma if (ti) {
214b8f67b9dSShashank Sharma dev_warn(adev->dev, "pid %d\n", ti->pid);
215b8f67b9dSShashank Sharma amdgpu_vm_put_task_info(ti);
21650661eb1SFelix Kuehling }
21750661eb1SFelix Kuehling }
21850661eb1SFelix Kuehling }
21950661eb1SFelix Kuehling mutex_unlock(&vm->process_info->lock);
220d38ceaf9SAlex Deucher
221d38ceaf9SAlex Deucher return r;
222246cb7e4SThomas Zimmermann }
223d38ceaf9SAlex Deucher
amdgpu_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)224d38ceaf9SAlex Deucher static void amdgpu_gem_object_close(struct drm_gem_object *obj,
225b5a5ec55SChristian König struct drm_file *file_priv)
226a7d64de6SChristian König {
227d38ceaf9SAlex Deucher struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
228d38ceaf9SAlex Deucher struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
229b5a5ec55SChristian König struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
23082c416b1SChristian König struct amdgpu_vm *vm = &fpriv->vm;
231d38ceaf9SAlex Deucher
2328a206685SChristian König struct dma_fence *fence = NULL;
23382c416b1SChristian König struct amdgpu_bo_va *bo_va;
234b5a5ec55SChristian König struct drm_exec exec;
23505d24935SRob Clark long r;
2368a206685SChristian König
2378a206685SChristian König drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
2388a206685SChristian König drm_exec_until_all_locked(&exec) {
2398a206685SChristian König r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
2408a206685SChristian König drm_exec_retry_on_contention(&exec);
241b5a5ec55SChristian König if (unlikely(r))
2428a206685SChristian König goto out_unlock;
2438a206685SChristian König
2448a206685SChristian König r = amdgpu_vm_lock_pd(vm, &exec, 0);
2458a206685SChristian König drm_exec_retry_on_contention(&exec);
246d38ceaf9SAlex Deucher if (unlikely(r))
2478a206685SChristian König goto out_unlock;
248b5a5ec55SChristian König }
24982c416b1SChristian König
25082c416b1SChristian König bo_va = amdgpu_vm_bo_find(vm, bo);
25182c416b1SChristian König if (!bo_va || --bo_va->ref_count)
252e56694f7SChristian König goto out_unlock;
25382c416b1SChristian König
25482c416b1SChristian König amdgpu_vm_bo_del(adev, bo_va);
25523e0563eSNicolai Hähnle amdgpu_vm_bo_update_shared(bo);
25682c416b1SChristian König if (!amdgpu_vm_ready(vm))
2578a206685SChristian König goto out_unlock;
2588a206685SChristian König
2598a206685SChristian König r = amdgpu_vm_clear_freed(adev, vm, &fence);
26082c416b1SChristian König if (unlikely(r < 0))
26182c416b1SChristian König dev_err(adev->dev, "failed to clear page "
26282c416b1SChristian König "tables on GEM object close (%ld)\n", r);
26382c416b1SChristian König if (r || !fence)
26423e0563eSNicolai Hähnle goto out_unlock;
26582c416b1SChristian König
26682c416b1SChristian König amdgpu_bo_fence(bo, fence, true);
2678a206685SChristian König dma_fence_put(fence);
2688a206685SChristian König
2698a206685SChristian König out_unlock:
270d38ceaf9SAlex Deucher if (r)
271d38ceaf9SAlex Deucher dev_err(adev->dev, "leaking bo va (%ld)\n", r);
27271df0368SThomas Zimmermann drm_exec_fini(&exec);
27371df0368SThomas Zimmermann }
27471df0368SThomas Zimmermann
amdgpu_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)27571df0368SThomas Zimmermann static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
27671df0368SThomas Zimmermann {
27771df0368SThomas Zimmermann struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
27871df0368SThomas Zimmermann
27971df0368SThomas Zimmermann if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
28071df0368SThomas Zimmermann return -EPERM;
281fa5239f2SFelix Kuehling if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
282fa5239f2SFelix Kuehling return -EPERM;
283fa5239f2SFelix Kuehling
284fa5239f2SFelix Kuehling /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
285fa5239f2SFelix Kuehling * for debugger access to invisible VRAM. Should have used MAP_SHARED
286fa5239f2SFelix Kuehling * instead. Clearing VM_MAYWRITE prevents the mapping from ever
287cc03817cSKefeng Wang * becoming writable and makes is_cow_mapping(vm_flags) false.
2881c71222eSSuren Baghdasaryan */
289fa5239f2SFelix Kuehling if (is_cow_mapping(vma->vm_flags) &&
29071df0368SThomas Zimmermann !(vma->vm_flags & VM_ACCESS_FLAGS))
29171df0368SThomas Zimmermann vm_flags_clear(vma, VM_MAYWRITE);
29271df0368SThomas Zimmermann
293*6dcba097SChristian König return drm_gem_ttm_mmap(obj, vma);
294246cb7e4SThomas Zimmermann }
295246cb7e4SThomas Zimmermann
296246cb7e4SThomas Zimmermann const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
297246cb7e4SThomas Zimmermann .free = amdgpu_gem_object_free,
29849a3f51dSThomas Zimmermann .open = amdgpu_gem_object_open,
29949a3f51dSThomas Zimmermann .close = amdgpu_gem_object_close,
30071df0368SThomas Zimmermann .export = amdgpu_gem_prime_export,
30171df0368SThomas Zimmermann .vmap = drm_gem_ttm_vmap,
302246cb7e4SThomas Zimmermann .vunmap = drm_gem_ttm_vunmap,
303246cb7e4SThomas Zimmermann .mmap = amdgpu_gem_object_mmap,
304d38ceaf9SAlex Deucher .vm_ops = &amdgpu_gem_vm_ops,
305d38ceaf9SAlex Deucher };
306d38ceaf9SAlex Deucher
307d38ceaf9SAlex Deucher /*
308d38ceaf9SAlex Deucher * GEM ioctls.
309d38ceaf9SAlex Deucher */
amdgpu_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)3101348969aSLuben Tuikov int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
311e1eb899bSChristian König struct drm_file *filp)
312e1eb899bSChristian König {
313d38ceaf9SAlex Deucher struct amdgpu_device *adev = drm_to_adev(dev);
3146ac7defbSChristian König struct amdgpu_fpriv *fpriv = filp->driver_priv;
315d38ceaf9SAlex Deucher struct amdgpu_vm *vm = &fpriv->vm;
31652791eeeSChristian König union drm_amdgpu_gem_create *args = data;
317d38ceaf9SAlex Deucher uint64_t flags = args->in.domain_flags;
318f8aab604SAndrey Grodzovsky uint64_t size = args->in.bo_size;
319d38ceaf9SAlex Deucher struct dma_resv *resv = NULL;
320d38ceaf9SAlex Deucher struct drm_gem_object *gobj;
3216be2ad4fSAlex Deucher uint32_t handle, initial_domain;
3226be2ad4fSAlex Deucher int r;
3236be2ad4fSAlex Deucher
3246be2ad4fSAlex Deucher /* reject DOORBELLs until userspace code to use it is available */
325834e0f8aSAlex Deucher if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
3266ac7defbSChristian König return -EINVAL;
327834e0f8aSAlex Deucher
328834e0f8aSAlex Deucher /* reject invalid gem flags */
329e1eb899bSChristian König if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
330177ae09bSAndres Rodriguez AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
3314cd24494SAlex Deucher AMDGPU_GEM_CREATE_CPU_GTT_USWC |
332fab2cc83SChristian König AMDGPU_GEM_CREATE_VRAM_CLEARED |
3337c85e970SLikun Gao AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
334fab2cc83SChristian König AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
335a022c54eSChristian König AMDGPU_GEM_CREATE_ENCRYPTED |
336a022c54eSChristian König AMDGPU_GEM_CREATE_GFX12_DCC |
337834e0f8aSAlex Deucher AMDGPU_GEM_CREATE_DISCARDABLE))
3383f188453SChunming Zhou return -EINVAL;
339a022c54eSChristian König
340834e0f8aSAlex Deucher /* reject invalid gem domains */
34111b407a7SHuang Rui if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
34211b407a7SHuang Rui return -EINVAL;
3434cd24494SAlex Deucher
3444cd24494SAlex Deucher if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
3454cd24494SAlex Deucher DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
3466c0a7c3cSAlex Deucher return -EINVAL;
3476c0a7c3cSAlex Deucher }
3486c0a7c3cSAlex Deucher
349d38ceaf9SAlex Deucher /* always clear VRAM */
350d38ceaf9SAlex Deucher flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
351d38ceaf9SAlex Deucher
352ee5309d5SChunming Zhou /* create a gem object to contain this object in */
353ee5309d5SChunming Zhou if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
354ee5309d5SChunming Zhou AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
355ee5309d5SChunming Zhou if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
356ee5309d5SChunming Zhou /* if gds bo is created from user space, it must be
357ee5309d5SChunming Zhou * passed to bo list
358ee5309d5SChunming Zhou */
3596ac7defbSChristian König DRM_ERROR("GDS bo cannot be per-vm-bo\n");
360d38ceaf9SAlex Deucher return -EINVAL;
361d38ceaf9SAlex Deucher }
362e1eb899bSChristian König flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
363391629bdSNirmoy Das }
364e1eb899bSChristian König
365e1eb899bSChristian König if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
366e1eb899bSChristian König r = amdgpu_bo_reserve(vm->root.bo, false);
367391629bdSNirmoy Das if (r)
368e1eb899bSChristian König return r;
369e1eb899bSChristian König
370f8aab604SAndrey Grodzovsky resv = vm->root.bo->tbo.base.resv;
37147722220SChristian König }
372d38ceaf9SAlex Deucher
37347722220SChristian König initial_domain = (u32)(0xffffffff & args->in.domains);
374b125b80bSJames Zhu retry:
375703677d9Sxinhui pan r = amdgpu_gem_object_create(adev, size, args->in.alignment,
37647722220SChristian König initial_domain,
37747722220SChristian König flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1);
37847722220SChristian König if (r && r != -ERESTARTSYS) {
37947722220SChristian König if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
38047722220SChristian König flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
38147722220SChristian König goto retry;
38247722220SChristian König }
38347722220SChristian König
38447722220SChristian König if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
385f8aab604SAndrey Grodzovsky initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
386f8aab604SAndrey Grodzovsky goto retry;
387f8aab604SAndrey Grodzovsky }
388f8aab604SAndrey Grodzovsky DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
389e1eb899bSChristian König size, initial_domain, args->in.alignment, r);
390e1eb899bSChristian König }
391e1eb899bSChristian König
392e1eb899bSChristian König if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
393391629bdSNirmoy Das if (!r) {
394e1eb899bSChristian König struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
395391629bdSNirmoy Das
396e1eb899bSChristian König abo->parent = amdgpu_bo_ref(vm->root.bo);
397d38ceaf9SAlex Deucher }
398a022c54eSChristian König amdgpu_bo_unreserve(vm->root.bo);
399d38ceaf9SAlex Deucher }
400d38ceaf9SAlex Deucher if (r)
401d38ceaf9SAlex Deucher return r;
402e07ddb0cSEmil Velikov
403d38ceaf9SAlex Deucher r = drm_gem_handle_create(filp, gobj, &handle);
404a022c54eSChristian König /* drop reference from allocate - handle holds it now */
405d38ceaf9SAlex Deucher drm_gem_object_put(gobj);
406d38ceaf9SAlex Deucher if (r)
407d38ceaf9SAlex Deucher return r;
408d38ceaf9SAlex Deucher
409d38ceaf9SAlex Deucher memset(args, 0, sizeof(*args));
410d38ceaf9SAlex Deucher args->out.handle = handle;
411d38ceaf9SAlex Deucher return 0;
412d38ceaf9SAlex Deucher }
413d38ceaf9SAlex Deucher
amdgpu_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)41419be5570SChristian König int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
4151348969aSLuben Tuikov struct drm_file *filp)
416d38ceaf9SAlex Deucher {
417b125b80bSJames Zhu struct ttm_operation_ctx ctx = { true, false };
418d38ceaf9SAlex Deucher struct amdgpu_device *adev = drm_to_adev(dev);
419fec8fdb5SChristian König struct drm_amdgpu_gem_userptr *args = data;
420d38ceaf9SAlex Deucher struct amdgpu_fpriv *fpriv = filp->driver_priv;
421d38ceaf9SAlex Deucher struct drm_gem_object *gobj;
422d38ceaf9SAlex Deucher struct hmm_range *range;
423d38ceaf9SAlex Deucher struct amdgpu_bo *bo;
42435f3fc87SAndrey Konovalov uint32_t handle;
42535f3fc87SAndrey Konovalov int r;
426d38ceaf9SAlex Deucher
427d38ceaf9SAlex Deucher args->addr = untagged_addr(args->addr);
428d38ceaf9SAlex Deucher
429d38ceaf9SAlex Deucher if (offset_in_page(args->addr | args->size))
430d38ceaf9SAlex Deucher return -EINVAL;
431d38ceaf9SAlex Deucher
432d38ceaf9SAlex Deucher /* reject unknown flag values */
433d38ceaf9SAlex Deucher if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
434d38ceaf9SAlex Deucher AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
435358c258aSChristian König AMDGPU_GEM_USERPTR_REGISTER))
436358c258aSChristian König return -EINVAL;
437d38ceaf9SAlex Deucher
438358c258aSChristian König if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
439d38ceaf9SAlex Deucher !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
440d38ceaf9SAlex Deucher
441d38ceaf9SAlex Deucher /* if we want to write to it we must install a MMU notifier */
442d38ceaf9SAlex Deucher return -EACCES;
443e1eb899bSChristian König }
444b125b80bSJames Zhu
445d38ceaf9SAlex Deucher /* create a gem object to contain this object in */
446a022c54eSChristian König r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
447d38ceaf9SAlex Deucher 0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
448d38ceaf9SAlex Deucher if (r)
4496d7d9c5aSKent Russell return r;
4501ea863fdSChristian König
45177f47d23SChristian König bo = gem_to_amdgpu_bo(gobj);
452d38ceaf9SAlex Deucher bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
453d38ceaf9SAlex Deucher bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
454d38ceaf9SAlex Deucher r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
455d9483ecdSChristian König if (r)
456d38ceaf9SAlex Deucher goto release_object;
457d38ceaf9SAlex Deucher
458d38ceaf9SAlex Deucher r = amdgpu_hmm_register(bo, args->addr);
459d38ceaf9SAlex Deucher if (r)
460fec8fdb5SChristian König goto release_object;
461fec8fdb5SChristian König
4622f568dbdSChristian König if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
463d5a480b4SXiangliang.Yu r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
4642f568dbdSChristian König &range);
465d38ceaf9SAlex Deucher if (r)
4662f568dbdSChristian König goto release_object;
467899fbde1SPhilip Yang
468d38ceaf9SAlex Deucher r = amdgpu_bo_reserve(bo, true);
469c704ab18SChristian König if (r)
47019be5570SChristian König goto user_pages_done;
471d38ceaf9SAlex Deucher
472d38ceaf9SAlex Deucher amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
473899fbde1SPhilip Yang r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
474d38ceaf9SAlex Deucher amdgpu_bo_unreserve(bo);
475d38ceaf9SAlex Deucher if (r)
476d38ceaf9SAlex Deucher goto user_pages_done;
477d38ceaf9SAlex Deucher }
478899fbde1SPhilip Yang
479d38ceaf9SAlex Deucher r = drm_gem_handle_create(filp, gobj, &handle);
480d38ceaf9SAlex Deucher if (r)
481d38ceaf9SAlex Deucher goto user_pages_done;
482899fbde1SPhilip Yang
483899fbde1SPhilip Yang args->handle = handle;
484fec8fdb5SChristian König
4852f568dbdSChristian König user_pages_done:
486d38ceaf9SAlex Deucher if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
487e07ddb0cSEmil Velikov amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
488d38ceaf9SAlex Deucher
489d38ceaf9SAlex Deucher release_object:
490d38ceaf9SAlex Deucher drm_gem_object_put(gobj);
491d38ceaf9SAlex Deucher
492d38ceaf9SAlex Deucher return r;
493d38ceaf9SAlex Deucher }
494d38ceaf9SAlex Deucher
amdgpu_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)495d38ceaf9SAlex Deucher int amdgpu_mode_dumb_mmap(struct drm_file *filp,
496d38ceaf9SAlex Deucher struct drm_device *dev,
497d38ceaf9SAlex Deucher uint32_t handle, uint64_t *offset_p)
498d38ceaf9SAlex Deucher {
499a8ad0bd8SChris Wilson struct drm_gem_object *gobj;
50030953c4dSSrinivasan Shanmugam struct amdgpu_bo *robj;
501d38ceaf9SAlex Deucher
50230953c4dSSrinivasan Shanmugam gobj = drm_gem_object_lookup(filp, handle);
503d38ceaf9SAlex Deucher if (!gobj)
504cc325d19SChristian König return -ENOENT;
505271c8125SChristian König
506e07ddb0cSEmil Velikov robj = gem_to_amdgpu_bo(gobj);
507d38ceaf9SAlex Deucher if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
508d38ceaf9SAlex Deucher (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
509d38ceaf9SAlex Deucher drm_gem_object_put(gobj);
510e07ddb0cSEmil Velikov return -EPERM;
511d38ceaf9SAlex Deucher }
512d38ceaf9SAlex Deucher *offset_p = amdgpu_bo_mmap_offset(robj);
513d38ceaf9SAlex Deucher drm_gem_object_put(gobj);
514d38ceaf9SAlex Deucher return 0;
515d38ceaf9SAlex Deucher }
516d38ceaf9SAlex Deucher
amdgpu_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)517d38ceaf9SAlex Deucher int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
518d38ceaf9SAlex Deucher struct drm_file *filp)
51930953c4dSSrinivasan Shanmugam {
520d38ceaf9SAlex Deucher union drm_amdgpu_gem_mmap *args = data;
521d38ceaf9SAlex Deucher uint32_t handle = args->in.handle;
522d38ceaf9SAlex Deucher
523d38ceaf9SAlex Deucher memset(args, 0, sizeof(*args));
524d38ceaf9SAlex Deucher return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
525d38ceaf9SAlex Deucher }
526d38ceaf9SAlex Deucher
527d38ceaf9SAlex Deucher /**
528d38ceaf9SAlex Deucher * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
529d38ceaf9SAlex Deucher *
530d38ceaf9SAlex Deucher * @timeout_ns: timeout in ns
531d38ceaf9SAlex Deucher *
532d38ceaf9SAlex Deucher * Calculate the timeout in jiffies from an absolute timeout in ns.
533d38ceaf9SAlex Deucher */
amdgpu_gem_timeout(uint64_t timeout_ns)534d38ceaf9SAlex Deucher unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
535d38ceaf9SAlex Deucher {
536d38ceaf9SAlex Deucher unsigned long timeout_jiffies;
537d38ceaf9SAlex Deucher ktime_t timeout;
538d38ceaf9SAlex Deucher
539d38ceaf9SAlex Deucher /* clamp timeout if it's to large */
5400f117704SChristian König if (((int64_t)timeout_ns) < 0)
541d38ceaf9SAlex Deucher return MAX_SCHEDULE_TIMEOUT;
542d38ceaf9SAlex Deucher
543d38ceaf9SAlex Deucher timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
544d38ceaf9SAlex Deucher if (ktime_to_ns(timeout) < 0)
545d38ceaf9SAlex Deucher return 0;
546d38ceaf9SAlex Deucher
547d38ceaf9SAlex Deucher timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
548d38ceaf9SAlex Deucher /* clamp timeout to avoid unsigned-> signed overflow */
549d38ceaf9SAlex Deucher if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
550d38ceaf9SAlex Deucher return MAX_SCHEDULE_TIMEOUT - 1;
551d38ceaf9SAlex Deucher
552d38ceaf9SAlex Deucher return timeout_jiffies;
553d38ceaf9SAlex Deucher }
554d38ceaf9SAlex Deucher
amdgpu_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)555d38ceaf9SAlex Deucher int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
556d38ceaf9SAlex Deucher struct drm_file *filp)
557d38ceaf9SAlex Deucher {
558d38ceaf9SAlex Deucher union drm_amdgpu_gem_wait_idle *args = data;
559d38ceaf9SAlex Deucher struct drm_gem_object *gobj;
560d38ceaf9SAlex Deucher struct amdgpu_bo *robj;
561d38ceaf9SAlex Deucher uint32_t handle = args->in.handle;
562d38ceaf9SAlex Deucher unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
563a8ad0bd8SChris Wilson int r = 0;
56430953c4dSSrinivasan Shanmugam long ret;
565d38ceaf9SAlex Deucher
56630953c4dSSrinivasan Shanmugam gobj = drm_gem_object_lookup(filp, handle);
567d38ceaf9SAlex Deucher if (!gobj)
5687bc80a54SChristian König return -ENOENT;
5697bc80a54SChristian König
570d38ceaf9SAlex Deucher robj = gem_to_amdgpu_bo(gobj);
571d38ceaf9SAlex Deucher ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
572d38ceaf9SAlex Deucher true, timeout);
573d38ceaf9SAlex Deucher
574d38ceaf9SAlex Deucher /* ret == 0 means not signaled,
575d38ceaf9SAlex Deucher * ret > 0 means signaled
576d38ceaf9SAlex Deucher * ret < 0 means interrupted before timeout
577d38ceaf9SAlex Deucher */
578d38ceaf9SAlex Deucher if (ret >= 0) {
579d38ceaf9SAlex Deucher memset(args, 0, sizeof(*args));
580d38ceaf9SAlex Deucher args->out.status = (ret == 0);
581e07ddb0cSEmil Velikov } else
582d38ceaf9SAlex Deucher r = ret;
583d38ceaf9SAlex Deucher
584d38ceaf9SAlex Deucher drm_gem_object_put(gobj);
585d38ceaf9SAlex Deucher return r;
586d38ceaf9SAlex Deucher }
587d38ceaf9SAlex Deucher
amdgpu_gem_metadata_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)588d38ceaf9SAlex Deucher int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
589d38ceaf9SAlex Deucher struct drm_file *filp)
590d38ceaf9SAlex Deucher {
591d38ceaf9SAlex Deucher struct drm_amdgpu_gem_metadata *args = data;
592d38ceaf9SAlex Deucher struct drm_gem_object *gobj;
593d38ceaf9SAlex Deucher struct amdgpu_bo *robj;
594a8ad0bd8SChris Wilson int r = -1;
595d38ceaf9SAlex Deucher
596d38ceaf9SAlex Deucher DRM_DEBUG("%d\n", args->handle);
597d38ceaf9SAlex Deucher gobj = drm_gem_object_lookup(filp, args->handle);
598d38ceaf9SAlex Deucher if (gobj == NULL)
599d38ceaf9SAlex Deucher return -ENOENT;
600d38ceaf9SAlex Deucher robj = gem_to_amdgpu_bo(gobj);
601d38ceaf9SAlex Deucher
602d38ceaf9SAlex Deucher r = amdgpu_bo_reserve(robj, false);
603d38ceaf9SAlex Deucher if (unlikely(r != 0))
604d38ceaf9SAlex Deucher goto out;
605d38ceaf9SAlex Deucher
606d38ceaf9SAlex Deucher if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
607d38ceaf9SAlex Deucher amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
608d38ceaf9SAlex Deucher r = amdgpu_bo_get_metadata(robj, args->data.data,
609d38ceaf9SAlex Deucher sizeof(args->data.data),
6100913eab6SDan Carpenter &args->data.data_size_bytes,
6110913eab6SDan Carpenter &args->data.flags);
6120913eab6SDan Carpenter } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
6130913eab6SDan Carpenter if (args->data.data_size_bytes > sizeof(args->data.data)) {
614d38ceaf9SAlex Deucher r = -EINVAL;
615d38ceaf9SAlex Deucher goto unreserve;
616d38ceaf9SAlex Deucher }
617d38ceaf9SAlex Deucher r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
618d38ceaf9SAlex Deucher if (!r)
619d38ceaf9SAlex Deucher r = amdgpu_bo_set_metadata(robj, args->data.data,
620d38ceaf9SAlex Deucher args->data.data_size_bytes,
6210913eab6SDan Carpenter args->data.flags);
622d38ceaf9SAlex Deucher }
623d38ceaf9SAlex Deucher
624e07ddb0cSEmil Velikov unreserve:
625d38ceaf9SAlex Deucher amdgpu_bo_unreserve(robj);
626d38ceaf9SAlex Deucher out:
627d38ceaf9SAlex Deucher drm_gem_object_put(gobj);
628d38ceaf9SAlex Deucher return r;
629d38ceaf9SAlex Deucher }
630d38ceaf9SAlex Deucher
631d38ceaf9SAlex Deucher /**
632dc54d3d1SChristian König * amdgpu_gem_va_update_vm -update the bo_va in its VM
633d38ceaf9SAlex Deucher *
634dc54d3d1SChristian König * @adev: amdgpu_device pointer
635d38ceaf9SAlex Deucher * @vm: vm to update
6362ffdaafbSChristian König * @bo_va: bo_va to update
637d38ceaf9SAlex Deucher * @operation: map, unmap or clear
638d38ceaf9SAlex Deucher *
639d38ceaf9SAlex Deucher * Update the bo_va directly after setting its address. Errors are not
640dc54d3d1SChristian König * vital here, so they are not reported back to userspace.
641f7da30d9SChristian König */
amdgpu_gem_va_update_vm(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va * bo_va,uint32_t operation)642f7da30d9SChristian König static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
643d38ceaf9SAlex Deucher struct amdgpu_vm *vm,
6443f3333f8SChristian König struct amdgpu_bo_va *bo_va,
645d38ceaf9SAlex Deucher uint32_t operation)
6463f3333f8SChristian König {
6473f3333f8SChristian König int r;
648e410b5cbSChunming Zhou
649f3467818SNicolai Hähnle if (!amdgpu_vm_ready(vm))
650d38ceaf9SAlex Deucher return;
6512ffdaafbSChristian König
652194a3364Smonk.liu r = amdgpu_vm_clear_freed(adev, vm, NULL);
65380f95c57SChristian König if (r)
65493bab704SGustavo A. R. Silva goto error;
6558f8cc3fbSChristian König
6560abc6878SChristian König if (operation == AMDGPU_VA_OP_MAP ||
6570abc6878SChristian König operation == AMDGPU_VA_OP_REPLACE) {
65893bab704SGustavo A. R. Silva r = amdgpu_vm_bo_update(adev, bo_va, false);
65993bab704SGustavo A. R. Silva if (r)
660807e2994SChristian König goto error;
6610abc6878SChristian König }
6622ffdaafbSChristian König
66368fdd3dfSChristian König r = amdgpu_vm_update_pdes(adev, vm, false);
664d38ceaf9SAlex Deucher
665d38ceaf9SAlex Deucher error:
666d38ceaf9SAlex Deucher if (r && r != -ERESTARTSYS)
66771776b6dSChristian König DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
66871776b6dSChristian König }
66971776b6dSChristian König
67071776b6dSChristian König /**
67171776b6dSChristian König * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
67271776b6dSChristian König *
67371776b6dSChristian König * @adev: amdgpu_device pointer
67471776b6dSChristian König * @flags: GEM UAPI flags
67571776b6dSChristian König *
67671776b6dSChristian König * Returns the GEM UAPI flags mapped into hardware for the ASIC.
67771776b6dSChristian König */
amdgpu_gem_va_map_flags(struct amdgpu_device * adev,uint32_t flags)67871776b6dSChristian König uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
67971776b6dSChristian König {
68071776b6dSChristian König uint64_t pte_flag = 0;
68171776b6dSChristian König
68271776b6dSChristian König if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
68371776b6dSChristian König pte_flag |= AMDGPU_PTE_EXECUTABLE;
68471776b6dSChristian König if (flags & AMDGPU_VM_PAGE_READABLE)
68571776b6dSChristian König pte_flag |= AMDGPU_PTE_READABLE;
686980a0a94SHawking Zhang if (flags & AMDGPU_VM_PAGE_WRITEABLE)
687b6c65a2cSChristian König pte_flag |= AMDGPU_PTE_WRITEABLE;
688b6c65a2cSChristian König if (flags & AMDGPU_VM_PAGE_PRT)
68971776b6dSChristian König pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
69071776b6dSChristian König if (flags & AMDGPU_VM_PAGE_NOALLOC)
69171776b6dSChristian König pte_flag |= AMDGPU_PTE_NOALLOC;
69271776b6dSChristian König
69371776b6dSChristian König if (adev->gmc.gmc_funcs->map_mtype)
69471776b6dSChristian König pte_flag |= amdgpu_gmc_map_mtype(adev,
69571776b6dSChristian König flags & AMDGPU_VM_MTYPE_MASK);
69671776b6dSChristian König
697d38ceaf9SAlex Deucher return pte_flag;
698d38ceaf9SAlex Deucher }
699d38ceaf9SAlex Deucher
amdgpu_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)700b85891bdSJunwei Zhang int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
701b85891bdSJunwei Zhang struct drm_file *filp)
702b6c65a2cSChristian König {
703b6c65a2cSChristian König const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
704b85891bdSJunwei Zhang AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
705b85891bdSJunwei Zhang AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
706b85891bdSJunwei Zhang AMDGPU_VM_PAGE_NOALLOC;
70734b5f6a6SChristian König const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
708d38ceaf9SAlex Deucher AMDGPU_VM_PAGE_PRT;
7091348969aSLuben Tuikov
710d38ceaf9SAlex Deucher struct drm_amdgpu_gem_va *args = data;
711765e7fbfSChristian König struct drm_gem_object *gobj;
712d38ceaf9SAlex Deucher struct amdgpu_device *adev = drm_to_adev(dev);
7138a206685SChristian König struct amdgpu_fpriv *fpriv = filp->driver_priv;
7145463545bSAlex Xie struct amdgpu_bo *abo;
715c4aa8dffSMadhav Chauhan struct amdgpu_bo_va *bo_va;
716d38ceaf9SAlex Deucher struct drm_exec exec;
717d38ceaf9SAlex Deucher uint64_t va_flags;
71800a11f97SArunpravin Paneer Selvam uint64_t vm_size;
7198f66090bSThomas Zimmermann int r = 0;
72030953c4dSSrinivasan Shanmugam
72100a11f97SArunpravin Paneer Selvam if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) {
722d38ceaf9SAlex Deucher dev_dbg(dev->dev,
723d38ceaf9SAlex Deucher "va_address 0x%llx is in reserved area 0x%llx\n",
724d38ceaf9SAlex Deucher args->va_address, AMDGPU_VA_RESERVED_BOTTOM);
725ad9a5b78SChristian König return -EINVAL;
726ad9a5b78SChristian König }
7278f66090bSThomas Zimmermann
72830953c4dSSrinivasan Shanmugam if (args->va_address >= AMDGPU_GMC_HOLE_START &&
729ad9a5b78SChristian König args->va_address < AMDGPU_GMC_HOLE_END) {
730ad9a5b78SChristian König dev_dbg(dev->dev,
731bb7939b2SChristian König "va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
732bb7939b2SChristian König args->va_address, AMDGPU_GMC_HOLE_START,
733bb7939b2SChristian König AMDGPU_GMC_HOLE_END);
734ad9a5b78SChristian König return -EINVAL;
735bb7939b2SChristian König }
736c4aa8dffSMadhav Chauhan
73700a11f97SArunpravin Paneer Selvam args->va_address &= AMDGPU_GMC_HOLE_MASK;
738c4aa8dffSMadhav Chauhan
7398f66090bSThomas Zimmermann vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
740c4aa8dffSMadhav Chauhan vm_size -= AMDGPU_VA_RESERVED_TOP;
741c4aa8dffSMadhav Chauhan if (args->va_address + args->map_size > vm_size) {
742c4aa8dffSMadhav Chauhan dev_dbg(dev->dev,
743c4aa8dffSMadhav Chauhan "va_address 0x%llx is in top reserved area 0x%llx\n",
744c4aa8dffSMadhav Chauhan args->va_address + args->map_size, vm_size);
745b85891bdSJunwei Zhang return -EINVAL;
7468f66090bSThomas Zimmermann }
747b85891bdSJunwei Zhang
748d38ceaf9SAlex Deucher if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
749d38ceaf9SAlex Deucher dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
750d38ceaf9SAlex Deucher args->flags);
75134b5f6a6SChristian König return -EINVAL;
752d38ceaf9SAlex Deucher }
753d38ceaf9SAlex Deucher
754dc54d3d1SChristian König switch (args->operation) {
75580f95c57SChristian König case AMDGPU_VA_OP_MAP:
756d38ceaf9SAlex Deucher case AMDGPU_VA_OP_UNMAP:
757d38ceaf9SAlex Deucher case AMDGPU_VA_OP_CLEAR:
7588f66090bSThomas Zimmermann case AMDGPU_VA_OP_REPLACE:
75934b5f6a6SChristian König break;
760d38ceaf9SAlex Deucher default:
761d38ceaf9SAlex Deucher dev_dbg(dev->dev, "unsupported operation %d\n",
762d38ceaf9SAlex Deucher args->operation);
763dc54d3d1SChristian König return -EINVAL;
764dc54d3d1SChristian König }
765a8ad0bd8SChris Wilson
76634b5f6a6SChristian König if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
767d38ceaf9SAlex Deucher !(args->flags & AMDGPU_VM_PAGE_PRT)) {
768765e7fbfSChristian König gobj = drm_gem_object_lookup(filp, args->handle);
769b85891bdSJunwei Zhang if (gobj == NULL)
770b85891bdSJunwei Zhang return -ENOENT;
771b85891bdSJunwei Zhang abo = gem_to_amdgpu_bo(gobj);
772b85891bdSJunwei Zhang } else {
77349b02b18SChunming Zhou gobj = NULL;
7748a206685SChristian König abo = NULL;
77505d24935SRob Clark }
7768a206685SChristian König
7778a206685SChristian König drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
7788a206685SChristian König DRM_EXEC_IGNORE_DUPLICATES, 0);
7798a206685SChristian König drm_exec_until_all_locked(&exec) {
7808a206685SChristian König if (gobj) {
7818a206685SChristian König r = drm_exec_lock_obj(&exec, gobj);
7828a206685SChristian König drm_exec_retry_on_contention(&exec);
783b5a5ec55SChristian König if (unlikely(r))
7848a206685SChristian König goto error;
7858a206685SChristian König }
7868a206685SChristian König
7878a206685SChristian König r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
7888a206685SChristian König drm_exec_retry_on_contention(&exec);
78934b5f6a6SChristian König if (unlikely(r))
790b85891bdSJunwei Zhang goto error;
791765e7fbfSChristian König }
792d38ceaf9SAlex Deucher
793b85891bdSJunwei Zhang if (abo) {
7948a206685SChristian König bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
795b85891bdSJunwei Zhang if (!bo_va) {
796dc54d3d1SChristian König r = -ENOENT;
797b85891bdSJunwei Zhang goto error;
798dc54d3d1SChristian König }
799dc54d3d1SChristian König } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
800d38ceaf9SAlex Deucher bo_va = fpriv->prt_va;
801d38ceaf9SAlex Deucher } else {
80234b5f6a6SChristian König bo_va = NULL;
803d38ceaf9SAlex Deucher }
80471776b6dSChristian König
80534b5f6a6SChristian König switch (args->operation) {
80634b5f6a6SChristian König case AMDGPU_VA_OP_MAP:
8079f7eb536SChristian König va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
808d38ceaf9SAlex Deucher r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
809d38ceaf9SAlex Deucher args->offset_in_bo, args->map_size,
81034b5f6a6SChristian König va_flags);
811d38ceaf9SAlex Deucher break;
812dc54d3d1SChristian König case AMDGPU_VA_OP_UNMAP:
813dc54d3d1SChristian König r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
814dc54d3d1SChristian König break;
815dc54d3d1SChristian König
816dc54d3d1SChristian König case AMDGPU_VA_OP_CLEAR:
817dc54d3d1SChristian König r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
81880f95c57SChristian König args->va_address,
81971776b6dSChristian König args->map_size);
82080f95c57SChristian König break;
82180f95c57SChristian König case AMDGPU_VA_OP_REPLACE:
82280f95c57SChristian König va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
82380f95c57SChristian König r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
824d38ceaf9SAlex Deucher args->offset_in_bo, args->map_size,
825d38ceaf9SAlex Deucher va_flags);
826d38ceaf9SAlex Deucher break;
827887db1e4SAndré Almeida default:
82859d61be2SJunwei Zhang break;
829dc54d3d1SChristian König }
830b85891bdSJunwei Zhang if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm)
8318a206685SChristian König amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
8328a206685SChristian König args->operation);
833e07ddb0cSEmil Velikov
834d38ceaf9SAlex Deucher error:
835d38ceaf9SAlex Deucher drm_exec_fini(&exec);
836d38ceaf9SAlex Deucher drm_gem_object_put(gobj);
837d38ceaf9SAlex Deucher return r;
838d38ceaf9SAlex Deucher }
839d38ceaf9SAlex Deucher
amdgpu_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)8401348969aSLuben Tuikov int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
841d38ceaf9SAlex Deucher struct drm_file *filp)
842d38ceaf9SAlex Deucher {
843b4ae4fe6Sshaoyunl struct drm_amdgpu_gem_op *args = data;
844d38ceaf9SAlex Deucher struct drm_gem_object *gobj;
845d38ceaf9SAlex Deucher struct amdgpu_vm_bo_base *base;
846d38ceaf9SAlex Deucher struct amdgpu_bo *robj;
847a8ad0bd8SChris Wilson int r;
84830953c4dSSrinivasan Shanmugam
849d38ceaf9SAlex Deucher gobj = drm_gem_object_lookup(filp, args->handle);
85030953c4dSSrinivasan Shanmugam if (!gobj)
851d38ceaf9SAlex Deucher return -ENOENT;
852d38ceaf9SAlex Deucher
853d38ceaf9SAlex Deucher robj = gem_to_amdgpu_bo(gobj);
854d38ceaf9SAlex Deucher
855d38ceaf9SAlex Deucher r = amdgpu_bo_reserve(robj, false);
856d38ceaf9SAlex Deucher if (unlikely(r))
857d38ceaf9SAlex Deucher goto out;
858d38ceaf9SAlex Deucher
859d38ceaf9SAlex Deucher switch (args->op) {
8607ecc245aSChristian König case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
861d38ceaf9SAlex Deucher struct drm_amdgpu_gem_create_in info;
862c105de28SGerd Hoffmann void __user *out = u64_to_user_ptr(args->value);
863c777dc9eSChristian König
8646d7d9c5aSKent Russell info.bo_size = robj->tbo.base.size;
865d38ceaf9SAlex Deucher info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
8664c28fb0bSChristian König info.domains = robj->preferred_domains;
867d38ceaf9SAlex Deucher info.domain_flags = robj->flags;
868d38ceaf9SAlex Deucher amdgpu_bo_unreserve(robj);
869d38ceaf9SAlex Deucher if (copy_to_user(out, &info, sizeof(info)))
870d38ceaf9SAlex Deucher r = -EFAULT;
871d8f65a23SMarek Olšák break;
8728c505bdcSChristian König }
8738c505bdcSChristian König case AMDGPU_GEM_OP_SET_PLACEMENT:
874803d89adSChristopher James Halse Rogers if (robj->tbo.base.import_attach &&
875803d89adSChristopher James Halse Rogers args->value & AMDGPU_GEM_DOMAIN_VRAM) {
876803d89adSChristopher James Halse Rogers r = -EINVAL;
877803d89adSChristopher James Halse Rogers amdgpu_bo_unreserve(robj);
878cc325d19SChristian König break;
879d38ceaf9SAlex Deucher }
8804c28fb0bSChristian König if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
881d38ceaf9SAlex Deucher r = -EPERM;
882d38ceaf9SAlex Deucher amdgpu_bo_unreserve(robj);
883b4ae4fe6Sshaoyunl break;
884b4ae4fe6Sshaoyunl }
885391629bdSNirmoy Das for (base = robj->vm_bo; base; base = base->next)
886b4ae4fe6Sshaoyunl if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
887b4ae4fe6Sshaoyunl amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
888b4ae4fe6Sshaoyunl r = -EINVAL;
889b4ae4fe6Sshaoyunl amdgpu_bo_unreserve(robj);
890b4ae4fe6Sshaoyunl goto out;
891b4ae4fe6Sshaoyunl }
8926d7d9c5aSKent Russell
893d38ceaf9SAlex Deucher
894d38ceaf9SAlex Deucher robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
8956d7d9c5aSKent Russell AMDGPU_GEM_DOMAIN_GTT |
8961ea863fdSChristian König AMDGPU_GEM_DOMAIN_CPU);
8971ea863fdSChristian König robj->allowed_domains = robj->preferred_domains;
8981ea863fdSChristian König if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
899e1eb899bSChristian König robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
900e1eb899bSChristian König
901e1eb899bSChristian König if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
9024c28fb0bSChristian König amdgpu_vm_bo_invalidate(robj, true);
903d38ceaf9SAlex Deucher
904d38ceaf9SAlex Deucher amdgpu_bo_unreserve(robj);
9054c28fb0bSChristian König break;
906d38ceaf9SAlex Deucher default:
907d38ceaf9SAlex Deucher amdgpu_bo_unreserve(robj);
908d38ceaf9SAlex Deucher r = -EINVAL;
909d38ceaf9SAlex Deucher }
910e07ddb0cSEmil Velikov
911d38ceaf9SAlex Deucher out:
912d38ceaf9SAlex Deucher drm_gem_object_put(gobj);
913d38ceaf9SAlex Deucher return r;
914087451f3SEvan Quan }
915087451f3SEvan Quan
amdgpu_gem_align_pitch(struct amdgpu_device * adev,int width,int cpp,bool tiled)916087451f3SEvan Quan static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
917087451f3SEvan Quan int width,
918087451f3SEvan Quan int cpp,
919087451f3SEvan Quan bool tiled)
920087451f3SEvan Quan {
921087451f3SEvan Quan int aligned = width;
922087451f3SEvan Quan int pitch_mask = 0;
923087451f3SEvan Quan
924087451f3SEvan Quan switch (cpp) {
925087451f3SEvan Quan case 1:
926087451f3SEvan Quan pitch_mask = 255;
927087451f3SEvan Quan break;
928087451f3SEvan Quan case 2:
929087451f3SEvan Quan pitch_mask = 127;
930087451f3SEvan Quan break;
931087451f3SEvan Quan case 3:
932087451f3SEvan Quan case 4:
933087451f3SEvan Quan pitch_mask = 63;
934087451f3SEvan Quan break;
935087451f3SEvan Quan }
936087451f3SEvan Quan
937087451f3SEvan Quan aligned += pitch_mask;
938087451f3SEvan Quan aligned &= ~pitch_mask;
939087451f3SEvan Quan return aligned * cpp;
940d38ceaf9SAlex Deucher }
941d38ceaf9SAlex Deucher
amdgpu_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)942d38ceaf9SAlex Deucher int amdgpu_mode_dumb_create(struct drm_file *file_priv,
943d38ceaf9SAlex Deucher struct drm_device *dev,
9441348969aSLuben Tuikov struct drm_mode_create_dumb *args)
945b125b80bSJames Zhu {
946d38ceaf9SAlex Deucher struct amdgpu_device *adev = drm_to_adev(dev);
947d38ceaf9SAlex Deucher struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
948e4c4073bSAndrey Grodzovsky struct drm_gem_object *gobj;
9491223c15cSEvan Quan uint32_t handle;
9501223c15cSEvan Quan u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
95184b74608SDeepak Sharma AMDGPU_GEM_CREATE_CPU_GTT_USWC |
952d38ceaf9SAlex Deucher AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
953d38ceaf9SAlex Deucher u32 domain;
95446846ba2SNicholas Kazlauskas int r;
95546846ba2SNicholas Kazlauskas
95646846ba2SNicholas Kazlauskas /*
95746846ba2SNicholas Kazlauskas * The buffer returned from this function should be cleared, but
95846846ba2SNicholas Kazlauskas * it can only be done if the ring is enabled or we'll fail to
95946846ba2SNicholas Kazlauskas * create the buffer.
96046846ba2SNicholas Kazlauskas */
96146846ba2SNicholas Kazlauskas if (adev->mman.buffer_funcs_enabled)
962087451f3SEvan Quan flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
9638e911ab7SLaurent Pinchart
96454ef0b54SDan Carpenter args->pitch = amdgpu_gem_align_pitch(adev, args->width,
965d38ceaf9SAlex Deucher DIV_ROUND_UP(args->bpp, 8), 0);
966d035f84dSYifan Zhang args->size = (u64)args->pitch * args->height;
967f2bd8a0eSAndrey Grodzovsky args->size = ALIGN(args->size, PAGE_SIZE);
96846846ba2SNicholas Kazlauskas domain = amdgpu_bo_get_preferred_domain(adev,
969b125b80bSJames Zhu amdgpu_display_supported_domains(adev, flags));
970d38ceaf9SAlex Deucher r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
971d38ceaf9SAlex Deucher ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
972d38ceaf9SAlex Deucher if (r)
973d38ceaf9SAlex Deucher return -ENOMEM;
974d38ceaf9SAlex Deucher
975e07ddb0cSEmil Velikov r = drm_gem_handle_create(file_priv, gobj, &handle);
97630953c4dSSrinivasan Shanmugam /* drop reference from allocate - handle holds it now */
977d38ceaf9SAlex Deucher drm_gem_object_put(gobj);
97830953c4dSSrinivasan Shanmugam if (r)
979d38ceaf9SAlex Deucher return r;
980d38ceaf9SAlex Deucher
981d38ceaf9SAlex Deucher args->handle = handle;
982d38ceaf9SAlex Deucher return 0;
983d38ceaf9SAlex Deucher }
98498d28ac2SNirmoy Das
9857ea23565SChristian König #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_gem_info_show(struct seq_file * m,void * unused)986109b4d8cSSu Hui static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
98798d28ac2SNirmoy Das {
9887ea23565SChristian König struct amdgpu_device *adev = m->private;
9897ea23565SChristian König struct drm_device *dev = adev_to_drm(adev);
9907ea23565SChristian König struct drm_file *file;
9911d2ac403SDaniel Vetter int r;
9927ea23565SChristian König
9937ea23565SChristian König r = mutex_lock_interruptible(&dev->filelist_mutex);
9947ea23565SChristian König if (r)
9957ea23565SChristian König return r;
9967ea23565SChristian König
997ff72bc40SMihir Bhogilal Patel list_for_each_entry(file, &dev->filelist, lhead) {
9981c7a387fSTvrtko Ursulin struct task_struct *task;
999ff72bc40SMihir Bhogilal Patel struct drm_gem_object *gobj;
10007ea23565SChristian König struct pid *pid;
10017ea23565SChristian König int id;
10027ea23565SChristian König
10037ea23565SChristian König /*
10047ea23565SChristian König * Although we have a valid reference on file->pid, that does
10057ea23565SChristian König * not guarantee that the task_struct who called get_pid() is
10067ea23565SChristian König * still alive (e.g. get_pid(current) => fork() => exit()).
10077ea23565SChristian König * Therefore, we need to protect this ->comm access using RCU.
10081c7a387fSTvrtko Ursulin */
10091c7a387fSTvrtko Ursulin rcu_read_lock();
10101c7a387fSTvrtko Ursulin pid = rcu_dereference(file->pid);
10117ea23565SChristian König task = pid_task(pid, PIDTYPE_TGID);
10127ea23565SChristian König seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
10137ea23565SChristian König task ? task->comm : "<unknown>");
10147ea23565SChristian König rcu_read_unlock();
1015ff72bc40SMihir Bhogilal Patel
1016ff72bc40SMihir Bhogilal Patel spin_lock(&file->table_lock);
1017ff72bc40SMihir Bhogilal Patel idr_for_each_entry(&file->object_idr, gobj, id) {
1018ff72bc40SMihir Bhogilal Patel struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
1019ff72bc40SMihir Bhogilal Patel
10207ea23565SChristian König amdgpu_bo_print_info(id, bo, m);
10217ea23565SChristian König }
10227ea23565SChristian König spin_unlock(&file->table_lock);
10231d2ac403SDaniel Vetter }
1024d38ceaf9SAlex Deucher
1025d38ceaf9SAlex Deucher mutex_unlock(&dev->filelist_mutex);
1026d38ceaf9SAlex Deucher return 0;
102798d28ac2SNirmoy Das }
102898d28ac2SNirmoy Das
1029d38ceaf9SAlex Deucher DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
1030d38ceaf9SAlex Deucher
103198d28ac2SNirmoy Das #endif
1032d38ceaf9SAlex Deucher
amdgpu_debugfs_gem_init(struct amdgpu_device * adev)1033d38ceaf9SAlex Deucher void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
103498d28ac2SNirmoy Das {
103598d28ac2SNirmoy Das #if defined(CONFIG_DEBUG_FS)
103698d28ac2SNirmoy Das struct drm_minor *minor = adev_to_drm(adev)->primary;
103798d28ac2SNirmoy Das struct dentry *root = minor->debugfs_root;
103898d28ac2SNirmoy Das
1039d38ceaf9SAlex Deucher debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
1040d38ceaf9SAlex Deucher &amdgpu_debugfs_gem_info_fops);
1041 #endif
1042 }
1043