1d38ceaf9SAlex Deucher /*
2d38ceaf9SAlex Deucher * Copyright 2009 Jerome Glisse.
3d38ceaf9SAlex Deucher * All Rights Reserved.
4d38ceaf9SAlex Deucher *
5d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a
6d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the
7d38ceaf9SAlex Deucher * "Software"), to deal in the Software without restriction, including
8d38ceaf9SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish,
9d38ceaf9SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to
10d38ceaf9SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to
11d38ceaf9SAlex Deucher * the following conditions:
12d38ceaf9SAlex Deucher *
13d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17d38ceaf9SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18d38ceaf9SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19d38ceaf9SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE.
20d38ceaf9SAlex Deucher *
21d38ceaf9SAlex Deucher * The above copyright notice and this permission notice (including the
22d38ceaf9SAlex Deucher * next paragraph) shall be included in all copies or substantial portions
23d38ceaf9SAlex Deucher * of the Software.
24d38ceaf9SAlex Deucher *
25d38ceaf9SAlex Deucher */
26d38ceaf9SAlex Deucher /*
27d38ceaf9SAlex Deucher * Authors:
28d38ceaf9SAlex Deucher * Jerome Glisse <[email protected]>
29d38ceaf9SAlex Deucher * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30d38ceaf9SAlex Deucher * Dave Airlie
31d38ceaf9SAlex Deucher */
32d38ceaf9SAlex Deucher #include <linux/list.h>
33d38ceaf9SAlex Deucher #include <linux/slab.h>
342d4dad27SChristian König #include <linux/dma-buf.h>
35fdf2f6c5SSam Ravnborg
3662d5f9f7SLeslie Shi #include <drm/drm_drv.h>
37d38ceaf9SAlex Deucher #include <drm/amdgpu_drm.h>
38a187f17fSOded Gabbay #include <drm/drm_cache.h>
39d38ceaf9SAlex Deucher #include "amdgpu.h"
40d38ceaf9SAlex Deucher #include "amdgpu_trace.h"
41a46a2cd1SFelix Kuehling #include "amdgpu_amdkfd.h"
42a68c7eaaSArunpravin Paneer Selvam #include "amdgpu_vram_mgr.h"
43fdee0872SYunxiang Li #include "amdgpu_vm.h"
44ebbe34edSPratap Nirujogi #include "amdgpu_dma_buf.h"
45d38ceaf9SAlex Deucher
466f4e8d6eSSamuel Li /**
476f4e8d6eSSamuel Li * DOC: amdgpu_object
486f4e8d6eSSamuel Li *
496f4e8d6eSSamuel Li * This defines the interfaces to operate on an &amdgpu_bo buffer object which
506f4e8d6eSSamuel Li * represents memory used by driver (VRAM, system memory, etc.). The driver
516f4e8d6eSSamuel Li * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
526f4e8d6eSSamuel Li * to create/destroy/set buffer object which are then managed by the kernel TTM
536f4e8d6eSSamuel Li * memory manager.
546f4e8d6eSSamuel Li * The interfaces are also used internally by kernel clients, including gfx,
556f4e8d6eSSamuel Li * uvd, etc. for kernel managed allocations used by the GPU.
566f4e8d6eSSamuel Li *
576f4e8d6eSSamuel Li */
586f4e8d6eSSamuel Li
amdgpu_bo_destroy(struct ttm_buffer_object * tbo)59c704ab18SChristian König static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
60d38ceaf9SAlex Deucher {
61b82485fdSAndres Rodriguez struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
62d38ceaf9SAlex Deucher
636375bbb4SChristian König amdgpu_bo_kunmap(bo);
64d38ceaf9SAlex Deucher
65c105de28SGerd Hoffmann if (bo->tbo.base.import_attach)
66c105de28SGerd Hoffmann drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
67c105de28SGerd Hoffmann drm_gem_object_release(&bo->tbo.base);
6823e24fbbSNirmoy Das amdgpu_bo_unref(&bo->parent);
6923e24fbbSNirmoy Das kvfree(bo);
7023e24fbbSNirmoy Das }
7123e24fbbSNirmoy Das
amdgpu_bo_user_destroy(struct ttm_buffer_object * tbo)7223e24fbbSNirmoy Das static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
7323e24fbbSNirmoy Das {
7423e24fbbSNirmoy Das struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
7523e24fbbSNirmoy Das struct amdgpu_bo_user *ubo;
7623e24fbbSNirmoy Das
7723e24fbbSNirmoy Das ubo = to_amdgpu_bo_user(bo);
7823e24fbbSNirmoy Das kfree(ubo->metadata);
7923e24fbbSNirmoy Das amdgpu_bo_destroy(tbo);
8023e24fbbSNirmoy Das }
8123e24fbbSNirmoy Das
826f4e8d6eSSamuel Li /**
83c704ab18SChristian König * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
846f4e8d6eSSamuel Li * @bo: buffer object to be checked
856f4e8d6eSSamuel Li *
866f4e8d6eSSamuel Li * Uses destroy function associated with the object to determine if this is
876f4e8d6eSSamuel Li * an &amdgpu_bo.
886f4e8d6eSSamuel Li *
892472e11bSMichel Dänzer * Returns:
902472e11bSMichel Dänzer * true if the object belongs to &amdgpu_bo, false if not.
916f4e8d6eSSamuel Li */
amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object * bo)92c704ab18SChristian König bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
93d38ceaf9SAlex Deucher {
9423e24fbbSNirmoy Das if (bo->destroy == &amdgpu_bo_destroy ||
957181faaaSChristian König bo->destroy == &amdgpu_bo_user_destroy)
96d38ceaf9SAlex Deucher return true;
9723e24fbbSNirmoy Das
98d38ceaf9SAlex Deucher return false;
99d38ceaf9SAlex Deucher }
100d38ceaf9SAlex Deucher
1016f4e8d6eSSamuel Li /**
102c704ab18SChristian König * amdgpu_bo_placement_from_domain - set buffer's placement
1036f4e8d6eSSamuel Li * @abo: &amdgpu_bo buffer object whose placement is to be set
1046f4e8d6eSSamuel Li * @domain: requested domain
1056f4e8d6eSSamuel Li *
1066f4e8d6eSSamuel Li * Sets buffer's placement according to requested domain and the buffer's
1076f4e8d6eSSamuel Li * flags.
1086f4e8d6eSSamuel Li */
amdgpu_bo_placement_from_domain(struct amdgpu_bo * abo,u32 domain)109c704ab18SChristian König void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
110d38ceaf9SAlex Deucher {
111c09312a6SChristian König struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
112c09312a6SChristian König struct ttm_placement *placement = &abo->placement;
113c09312a6SChristian König struct ttm_place *places = abo->placements;
114c09312a6SChristian König u64 flags = abo->flags;
1156369f6f1SChristian König u32 c = 0;
1167e5a547fSChunming Zhou
117d38ceaf9SAlex Deucher if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
1181d6ecab1SSrinivasan Shanmugam unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
1193ebfd221SPhilip Yang int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
120faceaf6aSChristian König
1213ebfd221SPhilip Yang if (adev->gmc.mem_partitions && mem_id >= 0) {
1223ebfd221SPhilip Yang places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
1236cfba94aSPhilip Yang /*
1246cfba94aSPhilip Yang * memory partition range lpfn is inclusive start + size - 1
1256cfba94aSPhilip Yang * TTM place lpfn is exclusive start + size
1266cfba94aSPhilip Yang */
1273ebfd221SPhilip Yang places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
1287f6db894SPhilip Yang } else {
129faceaf6aSChristian König places[c].fpfn = 0;
13089bb5752SChristian König places[c].lpfn = 0;
1317f6db894SPhilip Yang }
13248e07c23SChristian König places[c].mem_type = TTM_PL_VRAM;
133ce65b874SChristian König places[c].flags = 0;
13489bb5752SChristian König
135faceaf6aSChristian König if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
1367f6db894SPhilip Yang places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn);
13759eddd4eSArunpravin Paneer Selvam else
138faceaf6aSChristian König places[c].flags |= TTM_PL_FLAG_TOPDOWN;
13989bb5752SChristian König
140e362b7c8SArunpravin Paneer Selvam if (abo->tbo.type == ttm_bo_type_kernel &&
141e362b7c8SArunpravin Paneer Selvam flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
14289bb5752SChristian König places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
143e362b7c8SArunpravin Paneer Selvam
144faceaf6aSChristian König c++;
145d38ceaf9SAlex Deucher }
146d38ceaf9SAlex Deucher
147dc3499c7SAlex Deucher if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) {
148dc3499c7SAlex Deucher places[c].fpfn = 0;
149dc3499c7SAlex Deucher places[c].lpfn = 0;
150dc3499c7SAlex Deucher places[c].mem_type = AMDGPU_PL_DOORBELL;
151dc3499c7SAlex Deucher places[c].flags = 0;
152dc3499c7SAlex Deucher c++;
153dc3499c7SAlex Deucher }
154dc3499c7SAlex Deucher
155d38ceaf9SAlex Deucher if (domain & AMDGPU_GEM_DOMAIN_GTT) {
156faceaf6aSChristian König places[c].fpfn = 0;
157faceaf6aSChristian König places[c].lpfn = 0;
158b453e42aSFelix Kuehling places[c].mem_type =
159b453e42aSFelix Kuehling abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
160b453e42aSFelix Kuehling AMDGPU_PL_PREEMPT : TTM_PL_TT;
16148e07c23SChristian König places[c].flags = 0;
162216c1282SChristian König /*
163216c1282SChristian König * When GTT is just an alternative to VRAM make sure that we
164216c1282SChristian König * only use it as fallback and still try to fill up VRAM first.
165216c1282SChristian König */
166*a755906fSChristian König if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
167*a755906fSChristian König domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
168216c1282SChristian König places[c].flags |= TTM_PL_FLAG_FALLBACK;
169faceaf6aSChristian König c++;
170d38ceaf9SAlex Deucher }
171d38ceaf9SAlex Deucher
172d38ceaf9SAlex Deucher if (domain & AMDGPU_GEM_DOMAIN_CPU) {
173faceaf6aSChristian König places[c].fpfn = 0;
174faceaf6aSChristian König places[c].lpfn = 0;
17548e07c23SChristian König places[c].mem_type = TTM_PL_SYSTEM;
17648e07c23SChristian König places[c].flags = 0;
177faceaf6aSChristian König c++;
178d38ceaf9SAlex Deucher }
179d38ceaf9SAlex Deucher
180d38ceaf9SAlex Deucher if (domain & AMDGPU_GEM_DOMAIN_GDS) {
181faceaf6aSChristian König places[c].fpfn = 0;
182faceaf6aSChristian König places[c].lpfn = 0;
18348e07c23SChristian König places[c].mem_type = AMDGPU_PL_GDS;
184ce65b874SChristian König places[c].flags = 0;
185faceaf6aSChristian König c++;
186d38ceaf9SAlex Deucher }
187faceaf6aSChristian König
188d38ceaf9SAlex Deucher if (domain & AMDGPU_GEM_DOMAIN_GWS) {
189faceaf6aSChristian König places[c].fpfn = 0;
190faceaf6aSChristian König places[c].lpfn = 0;
19148e07c23SChristian König places[c].mem_type = AMDGPU_PL_GWS;
192ce65b874SChristian König places[c].flags = 0;
193faceaf6aSChristian König c++;
194d38ceaf9SAlex Deucher }
195faceaf6aSChristian König
196d38ceaf9SAlex Deucher if (domain & AMDGPU_GEM_DOMAIN_OA) {
197faceaf6aSChristian König places[c].fpfn = 0;
198faceaf6aSChristian König places[c].lpfn = 0;
19948e07c23SChristian König places[c].mem_type = AMDGPU_PL_OA;
200ce65b874SChristian König places[c].flags = 0;
201faceaf6aSChristian König c++;
202d38ceaf9SAlex Deucher }
203d38ceaf9SAlex Deucher
204d38ceaf9SAlex Deucher if (!c) {
205faceaf6aSChristian König places[c].fpfn = 0;
206faceaf6aSChristian König places[c].lpfn = 0;
20748e07c23SChristian König places[c].mem_type = TTM_PL_SYSTEM;
208ce65b874SChristian König places[c].flags = 0;
209faceaf6aSChristian König c++;
210d38ceaf9SAlex Deucher }
211d38ceaf9SAlex Deucher
212ea7acd7cSAndrey Grodzovsky BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
213bf314ca3SChristian König
214faceaf6aSChristian König placement->num_placement = c;
215faceaf6aSChristian König placement->placement = places;
216d38ceaf9SAlex Deucher }
217d38ceaf9SAlex Deucher
2187c204889SChristian König /**
2199d903cbdSChristian König * amdgpu_bo_create_reserved - create reserved BO for kernel use
2207c204889SChristian König *
2217c204889SChristian König * @adev: amdgpu device object
2227c204889SChristian König * @size: size for the new BO
2237c204889SChristian König * @align: alignment for the new BO
2247c204889SChristian König * @domain: where to place it
22564350f1bSAndrey Grodzovsky * @bo_ptr: used to initialize BOs in structures
2267c204889SChristian König * @gpu_addr: GPU addr of the pinned BO
2277c204889SChristian König * @cpu_addr: optional CPU address mapping
2287c204889SChristian König *
2299d903cbdSChristian König * Allocates and pins a BO for kernel internal use, and returns it still
2309d903cbdSChristian König * reserved.
2317c204889SChristian König *
23264350f1bSAndrey Grodzovsky * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
23364350f1bSAndrey Grodzovsky *
2342472e11bSMichel Dänzer * Returns:
2352472e11bSMichel Dänzer * 0 on success, negative error code otherwise.
2367c204889SChristian König */
amdgpu_bo_create_reserved(struct amdgpu_device * adev,unsigned long size,int align,u32 domain,struct amdgpu_bo ** bo_ptr,u64 * gpu_addr,void ** cpu_addr)2379d903cbdSChristian König int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
2387c204889SChristian König unsigned long size, int align,
2397c204889SChristian König u32 domain, struct amdgpu_bo **bo_ptr,
2407c204889SChristian König u64 *gpu_addr, void **cpu_addr)
2417c204889SChristian König {
2423216c6b7SChunming Zhou struct amdgpu_bo_param bp;
24353766e5aSChristian König bool free = false;
2447c204889SChristian König int r;
2457c204889SChristian König
24621a7e77fSChristian König if (!size) {
24721a7e77fSChristian König amdgpu_bo_unref(bo_ptr);
24821a7e77fSChristian König return 0;
24921a7e77fSChristian König }
25021a7e77fSChristian König
2513216c6b7SChunming Zhou memset(&bp, 0, sizeof(bp));
2523216c6b7SChunming Zhou bp.size = size;
2533216c6b7SChunming Zhou bp.byte_align = align;
2543216c6b7SChunming Zhou bp.domain = domain;
255828d6fdeSTianci.Yin bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
256828d6fdeSTianci.Yin : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
257828d6fdeSTianci.Yin bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2583216c6b7SChunming Zhou bp.type = ttm_bo_type_kernel;
2593216c6b7SChunming Zhou bp.resv = NULL;
2609fd5543eSNirmoy Das bp.bo_ptr_size = sizeof(struct amdgpu_bo);
2613216c6b7SChunming Zhou
26253766e5aSChristian König if (!*bo_ptr) {
2633216c6b7SChunming Zhou r = amdgpu_bo_create(adev, &bp, bo_ptr);
2647c204889SChristian König if (r) {
26553766e5aSChristian König dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
26653766e5aSChristian König r);
2677c204889SChristian König return r;
2687c204889SChristian König }
26953766e5aSChristian König free = true;
27053766e5aSChristian König }
2717c204889SChristian König
2727c204889SChristian König r = amdgpu_bo_reserve(*bo_ptr, false);
2737c204889SChristian König if (r) {
2747c204889SChristian König dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
2757c204889SChristian König goto error_free;
2767c204889SChristian König }
2777c204889SChristian König
2787b7c6c81SJunwei Zhang r = amdgpu_bo_pin(*bo_ptr, domain);
2797c204889SChristian König if (r) {
2807c204889SChristian König dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
2817c204889SChristian König goto error_unreserve;
2827c204889SChristian König }
283bb812f1eSJunwei Zhang
284bb812f1eSJunwei Zhang r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
285bb812f1eSJunwei Zhang if (r) {
286bb812f1eSJunwei Zhang dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
287bb812f1eSJunwei Zhang goto error_unpin;
288bb812f1eSJunwei Zhang }
289bb812f1eSJunwei Zhang
2907b7c6c81SJunwei Zhang if (gpu_addr)
2917b7c6c81SJunwei Zhang *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
2927c204889SChristian König
2937c204889SChristian König if (cpu_addr) {
2947c204889SChristian König r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
2957c204889SChristian König if (r) {
2967c204889SChristian König dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
297dc407ee0SJunwei Zhang goto error_unpin;
2987c204889SChristian König }
2997c204889SChristian König }
3007c204889SChristian König
3017c204889SChristian König return 0;
3027c204889SChristian König
303bb812f1eSJunwei Zhang error_unpin:
304bb812f1eSJunwei Zhang amdgpu_bo_unpin(*bo_ptr);
3057c204889SChristian König error_unreserve:
3067c204889SChristian König amdgpu_bo_unreserve(*bo_ptr);
3077c204889SChristian König
3087c204889SChristian König error_free:
30953766e5aSChristian König if (free)
3107c204889SChristian König amdgpu_bo_unref(bo_ptr);
3117c204889SChristian König
3127c204889SChristian König return r;
3137c204889SChristian König }
3147c204889SChristian König
315aa1d562eSJunwei Zhang /**
3169d903cbdSChristian König * amdgpu_bo_create_kernel - create BO for kernel use
3179d903cbdSChristian König *
3189d903cbdSChristian König * @adev: amdgpu device object
3199d903cbdSChristian König * @size: size for the new BO
3209d903cbdSChristian König * @align: alignment for the new BO
3219d903cbdSChristian König * @domain: where to place it
32264350f1bSAndrey Grodzovsky * @bo_ptr: used to initialize BOs in structures
3239d903cbdSChristian König * @gpu_addr: GPU addr of the pinned BO
3249d903cbdSChristian König * @cpu_addr: optional CPU address mapping
3259d903cbdSChristian König *
3269d903cbdSChristian König * Allocates and pins a BO for kernel internal use.
3279d903cbdSChristian König *
328ebbe34edSPratap Nirujogi * This function is exported to allow the V4L2 isp device
329ebbe34edSPratap Nirujogi * external to drm device to create and access the kernel BO.
330ebbe34edSPratap Nirujogi *
33164350f1bSAndrey Grodzovsky * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
33264350f1bSAndrey Grodzovsky *
3332472e11bSMichel Dänzer * Returns:
3342472e11bSMichel Dänzer * 0 on success, negative error code otherwise.
3359d903cbdSChristian König */
amdgpu_bo_create_kernel(struct amdgpu_device * adev,unsigned long size,int align,u32 domain,struct amdgpu_bo ** bo_ptr,u64 * gpu_addr,void ** cpu_addr)3369d903cbdSChristian König int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
3379d903cbdSChristian König unsigned long size, int align,
3389d903cbdSChristian König u32 domain, struct amdgpu_bo **bo_ptr,
3399d903cbdSChristian König u64 *gpu_addr, void **cpu_addr)
3409d903cbdSChristian König {
3419d903cbdSChristian König int r;
3429d903cbdSChristian König
3439d903cbdSChristian König r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
3449d903cbdSChristian König gpu_addr, cpu_addr);
3459d903cbdSChristian König
3469d903cbdSChristian König if (r)
3479d903cbdSChristian König return r;
3489d903cbdSChristian König
349ddaf5013STom St Denis if (*bo_ptr)
3509d903cbdSChristian König amdgpu_bo_unreserve(*bo_ptr);
3519d903cbdSChristian König
3529d903cbdSChristian König return 0;
3539d903cbdSChristian König }
354ebbe34edSPratap Nirujogi EXPORT_SYMBOL(amdgpu_bo_create_kernel);
355ebbe34edSPratap Nirujogi
356ebbe34edSPratap Nirujogi /**
357ebbe34edSPratap Nirujogi * amdgpu_bo_create_isp_user - create user BO for isp
358ebbe34edSPratap Nirujogi *
359ebbe34edSPratap Nirujogi * @adev: amdgpu device object
360ebbe34edSPratap Nirujogi * @dma_buf: DMABUF handle for isp buffer
361ebbe34edSPratap Nirujogi * @domain: where to place it
362ebbe34edSPratap Nirujogi * @bo: used to initialize BOs in structures
363ebbe34edSPratap Nirujogi * @gpu_addr: GPU addr of the pinned BO
364ebbe34edSPratap Nirujogi *
365ebbe34edSPratap Nirujogi * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
366ebbe34edSPratap Nirujogi * GART alloc to generate gpu_addr for BO to make it accessible through the
367ebbe34edSPratap Nirujogi * GART aperture for ISP HW.
368ebbe34edSPratap Nirujogi *
369ebbe34edSPratap Nirujogi * This function is exported to allow the V4L2 isp device external to drm device
370ebbe34edSPratap Nirujogi * to create and access the isp user BO.
371ebbe34edSPratap Nirujogi *
372ebbe34edSPratap Nirujogi * Returns:
373ebbe34edSPratap Nirujogi * 0 on success, negative error code otherwise.
374ebbe34edSPratap Nirujogi */
amdgpu_bo_create_isp_user(struct amdgpu_device * adev,struct dma_buf * dma_buf,u32 domain,struct amdgpu_bo ** bo,u64 * gpu_addr)375ebbe34edSPratap Nirujogi int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
376093bbeb9SSunil Khatri struct dma_buf *dma_buf, u32 domain, struct amdgpu_bo **bo,
377ebbe34edSPratap Nirujogi u64 *gpu_addr)
378ebbe34edSPratap Nirujogi
379ebbe34edSPratap Nirujogi {
380ebbe34edSPratap Nirujogi struct drm_gem_object *gem_obj;
381ebbe34edSPratap Nirujogi int r;
382ebbe34edSPratap Nirujogi
383093bbeb9SSunil Khatri gem_obj = amdgpu_gem_prime_import(&adev->ddev, dma_buf);
384ebbe34edSPratap Nirujogi *bo = gem_to_amdgpu_bo(gem_obj);
385ebbe34edSPratap Nirujogi if (!(*bo)) {
386ebbe34edSPratap Nirujogi dev_err(adev->dev, "failed to get valid isp user bo\n");
387ebbe34edSPratap Nirujogi return -EINVAL;
388ebbe34edSPratap Nirujogi }
389ebbe34edSPratap Nirujogi
390ebbe34edSPratap Nirujogi r = amdgpu_bo_reserve(*bo, false);
391ebbe34edSPratap Nirujogi if (r) {
392ebbe34edSPratap Nirujogi dev_err(adev->dev, "(%d) failed to reserve isp user bo\n", r);
393ebbe34edSPratap Nirujogi return r;
394ebbe34edSPratap Nirujogi }
395ebbe34edSPratap Nirujogi
396ebbe34edSPratap Nirujogi r = amdgpu_bo_pin(*bo, domain);
397ebbe34edSPratap Nirujogi if (r) {
398ebbe34edSPratap Nirujogi dev_err(adev->dev, "(%d) isp user bo pin failed\n", r);
399ebbe34edSPratap Nirujogi goto error_unreserve;
400ebbe34edSPratap Nirujogi }
401ebbe34edSPratap Nirujogi
402ebbe34edSPratap Nirujogi r = amdgpu_ttm_alloc_gart(&(*bo)->tbo);
403ebbe34edSPratap Nirujogi if (r) {
404ebbe34edSPratap Nirujogi dev_err(adev->dev, "%p bind failed\n", *bo);
405ebbe34edSPratap Nirujogi goto error_unpin;
406ebbe34edSPratap Nirujogi }
407ebbe34edSPratap Nirujogi
408ebbe34edSPratap Nirujogi if (!WARN_ON(!gpu_addr))
409ebbe34edSPratap Nirujogi *gpu_addr = amdgpu_bo_gpu_offset(*bo);
410ebbe34edSPratap Nirujogi
411ebbe34edSPratap Nirujogi amdgpu_bo_unreserve(*bo);
412ebbe34edSPratap Nirujogi
413ebbe34edSPratap Nirujogi return 0;
414ebbe34edSPratap Nirujogi
415ebbe34edSPratap Nirujogi error_unpin:
416ebbe34edSPratap Nirujogi amdgpu_bo_unpin(*bo);
417ebbe34edSPratap Nirujogi error_unreserve:
418ebbe34edSPratap Nirujogi amdgpu_bo_unreserve(*bo);
419ebbe34edSPratap Nirujogi amdgpu_bo_unref(bo);
420ebbe34edSPratap Nirujogi
421ebbe34edSPratap Nirujogi return r;
422ebbe34edSPratap Nirujogi }
423ebbe34edSPratap Nirujogi EXPORT_SYMBOL(amdgpu_bo_create_isp_user);
4249d903cbdSChristian König
4259d903cbdSChristian König /**
426de7b45baSChristian König * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
427de7b45baSChristian König *
428de7b45baSChristian König * @adev: amdgpu device object
429de7b45baSChristian König * @offset: offset of the BO
430de7b45baSChristian König * @size: size of the BO
431de7b45baSChristian König * @bo_ptr: used to initialize BOs in structures
432de7b45baSChristian König * @cpu_addr: optional CPU address mapping
433de7b45baSChristian König *
4343273f116SLuben Tuikov * Creates a kernel BO at a specific offset in VRAM.
435de7b45baSChristian König *
436de7b45baSChristian König * Returns:
437de7b45baSChristian König * 0 on success, negative error code otherwise.
438de7b45baSChristian König */
amdgpu_bo_create_kernel_at(struct amdgpu_device * adev,uint64_t offset,uint64_t size,struct amdgpu_bo ** bo_ptr,void ** cpu_addr)439de7b45baSChristian König int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
4403273f116SLuben Tuikov uint64_t offset, uint64_t size,
441de7b45baSChristian König struct amdgpu_bo **bo_ptr, void **cpu_addr)
442de7b45baSChristian König {
443de7b45baSChristian König struct ttm_operation_ctx ctx = { false, false };
444de7b45baSChristian König unsigned int i;
445de7b45baSChristian König int r;
446de7b45baSChristian König
447de7b45baSChristian König offset &= PAGE_MASK;
448de7b45baSChristian König size = ALIGN(size, PAGE_SIZE);
449de7b45baSChristian König
4503273f116SLuben Tuikov r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
4513273f116SLuben Tuikov AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
4523273f116SLuben Tuikov cpu_addr);
453de7b45baSChristian König if (r)
454de7b45baSChristian König return r;
455de7b45baSChristian König
45637912e96SAlex Deucher if ((*bo_ptr) == NULL)
45737912e96SAlex Deucher return 0;
45837912e96SAlex Deucher
459de7b45baSChristian König /*
460de7b45baSChristian König * Remove the original mem node and create a new one at the request
461de7b45baSChristian König * position.
462de7b45baSChristian König */
4634a246528SChristian König if (cpu_addr)
4644a246528SChristian König amdgpu_bo_kunmap(*bo_ptr);
4654a246528SChristian König
466bfa3357eSChristian König ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
4674a246528SChristian König
468de7b45baSChristian König for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
469de7b45baSChristian König (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
470de7b45baSChristian König (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
471de7b45baSChristian König }
472de7b45baSChristian König r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
473bfa3357eSChristian König &(*bo_ptr)->tbo.resource, &ctx);
474de7b45baSChristian König if (r)
475de7b45baSChristian König goto error;
476de7b45baSChristian König
477de7b45baSChristian König if (cpu_addr) {
478de7b45baSChristian König r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
479de7b45baSChristian König if (r)
480de7b45baSChristian König goto error;
481de7b45baSChristian König }
482de7b45baSChristian König
483de7b45baSChristian König amdgpu_bo_unreserve(*bo_ptr);
484de7b45baSChristian König return 0;
485de7b45baSChristian König
486de7b45baSChristian König error:
487de7b45baSChristian König amdgpu_bo_unreserve(*bo_ptr);
488de7b45baSChristian König amdgpu_bo_unref(bo_ptr);
489de7b45baSChristian König return r;
490de7b45baSChristian König }
491de7b45baSChristian König
492de7b45baSChristian König /**
493aa1d562eSJunwei Zhang * amdgpu_bo_free_kernel - free BO for kernel use
494aa1d562eSJunwei Zhang *
495aa1d562eSJunwei Zhang * @bo: amdgpu BO to free
4962472e11bSMichel Dänzer * @gpu_addr: pointer to where the BO's GPU memory space address was stored
4972472e11bSMichel Dänzer * @cpu_addr: pointer to where the BO's CPU memory space address was stored
498aa1d562eSJunwei Zhang *
499aa1d562eSJunwei Zhang * unmaps and unpin a BO for kernel internal use.
500ebbe34edSPratap Nirujogi *
501ebbe34edSPratap Nirujogi * This function is exported to allow the V4L2 isp device
502ebbe34edSPratap Nirujogi * external to drm device to free the kernel BO.
503aa1d562eSJunwei Zhang */
amdgpu_bo_free_kernel(struct amdgpu_bo ** bo,u64 * gpu_addr,void ** cpu_addr)504aa1d562eSJunwei Zhang void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
505aa1d562eSJunwei Zhang void **cpu_addr)
506aa1d562eSJunwei Zhang {
507aa1d562eSJunwei Zhang if (*bo == NULL)
508aa1d562eSJunwei Zhang return;
509aa1d562eSJunwei Zhang
5104d2ccd96SChristian König WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
5114d2ccd96SChristian König
512f3aa745eSAlex Xie if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
513aa1d562eSJunwei Zhang if (cpu_addr)
514aa1d562eSJunwei Zhang amdgpu_bo_kunmap(*bo);
515aa1d562eSJunwei Zhang
516aa1d562eSJunwei Zhang amdgpu_bo_unpin(*bo);
517aa1d562eSJunwei Zhang amdgpu_bo_unreserve(*bo);
518aa1d562eSJunwei Zhang }
519aa1d562eSJunwei Zhang amdgpu_bo_unref(bo);
520aa1d562eSJunwei Zhang
521aa1d562eSJunwei Zhang if (gpu_addr)
522aa1d562eSJunwei Zhang *gpu_addr = 0;
523aa1d562eSJunwei Zhang
524aa1d562eSJunwei Zhang if (cpu_addr)
525aa1d562eSJunwei Zhang *cpu_addr = NULL;
526aa1d562eSJunwei Zhang }
527ebbe34edSPratap Nirujogi EXPORT_SYMBOL(amdgpu_bo_free_kernel);
528ebbe34edSPratap Nirujogi
529ebbe34edSPratap Nirujogi /**
530ebbe34edSPratap Nirujogi * amdgpu_bo_free_isp_user - free BO for isp use
531ebbe34edSPratap Nirujogi *
532ebbe34edSPratap Nirujogi * @bo: amdgpu isp user BO to free
533ebbe34edSPratap Nirujogi *
534ebbe34edSPratap Nirujogi * unpin and unref BO for isp internal use.
535ebbe34edSPratap Nirujogi *
536ebbe34edSPratap Nirujogi * This function is exported to allow the V4L2 isp device
537ebbe34edSPratap Nirujogi * external to drm device to free the isp user BO.
538ebbe34edSPratap Nirujogi */
amdgpu_bo_free_isp_user(struct amdgpu_bo * bo)539ebbe34edSPratap Nirujogi void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
540ebbe34edSPratap Nirujogi {
541ebbe34edSPratap Nirujogi if (bo == NULL)
542ebbe34edSPratap Nirujogi return;
543ebbe34edSPratap Nirujogi
544ebbe34edSPratap Nirujogi if (amdgpu_bo_reserve(bo, true) == 0) {
545ebbe34edSPratap Nirujogi amdgpu_bo_unpin(bo);
546ebbe34edSPratap Nirujogi amdgpu_bo_unreserve(bo);
547ebbe34edSPratap Nirujogi }
548ebbe34edSPratap Nirujogi amdgpu_bo_unref(&bo);
549ebbe34edSPratap Nirujogi }
550ebbe34edSPratap Nirujogi EXPORT_SYMBOL(amdgpu_bo_free_isp_user);
551aa1d562eSJunwei Zhang
5528f9a9a09SMa Jun /* Validate bo size is bit bigger than the request domain */
amdgpu_bo_validate_size(struct amdgpu_device * adev,unsigned long size,u32 domain)55379c63123SAndrey Grodzovsky static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
55479c63123SAndrey Grodzovsky unsigned long size, u32 domain)
55579c63123SAndrey Grodzovsky {
5569de59bc2SDave Airlie struct ttm_resource_manager *man = NULL;
55779c63123SAndrey Grodzovsky
55879c63123SAndrey Grodzovsky /*
55979c63123SAndrey Grodzovsky * If GTT is part of requested domains the check must succeed to
5607554886dSLuben Tuikov * allow fall back to GTT.
56179c63123SAndrey Grodzovsky */
5628f9a9a09SMa Jun if (domain & AMDGPU_GEM_DOMAIN_GTT)
5636c28aed6SDave Airlie man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
5648f9a9a09SMa Jun else if (domain & AMDGPU_GEM_DOMAIN_VRAM)
5656c28aed6SDave Airlie man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
5668f9a9a09SMa Jun else
56779c63123SAndrey Grodzovsky return true;
5688f9a9a09SMa Jun
5698f9a9a09SMa Jun if (!man) {
5708f9a9a09SMa Jun if (domain & AMDGPU_GEM_DOMAIN_GTT)
5718f9a9a09SMa Jun WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
5728f9a9a09SMa Jun return false;
57379c63123SAndrey Grodzovsky }
57479c63123SAndrey Grodzovsky
575dc3499c7SAlex Deucher /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */
5768f9a9a09SMa Jun if (size < man->size)
57779c63123SAndrey Grodzovsky return true;
57879c63123SAndrey Grodzovsky
5798f9a9a09SMa Jun DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, man->size);
58079c63123SAndrey Grodzovsky return false;
58179c63123SAndrey Grodzovsky }
58279c63123SAndrey Grodzovsky
amdgpu_bo_support_uswc(u64 bo_flags)5833d1b8ec7SAndrey Grodzovsky bool amdgpu_bo_support_uswc(u64 bo_flags)
5843d1b8ec7SAndrey Grodzovsky {
5853d1b8ec7SAndrey Grodzovsky
5863d1b8ec7SAndrey Grodzovsky #ifdef CONFIG_X86_32
5873d1b8ec7SAndrey Grodzovsky /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
5883d1b8ec7SAndrey Grodzovsky * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
5893d1b8ec7SAndrey Grodzovsky */
5903d1b8ec7SAndrey Grodzovsky return false;
5913d1b8ec7SAndrey Grodzovsky #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
5923d1b8ec7SAndrey Grodzovsky /* Don't try to enable write-combining when it can't work, or things
5933d1b8ec7SAndrey Grodzovsky * may be slow
5943d1b8ec7SAndrey Grodzovsky * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
5953d1b8ec7SAndrey Grodzovsky */
5963d1b8ec7SAndrey Grodzovsky
5973d1b8ec7SAndrey Grodzovsky #ifndef CONFIG_COMPILE_TEST
5983d1b8ec7SAndrey Grodzovsky #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
5993d1b8ec7SAndrey Grodzovsky thanks to write-combining
6003d1b8ec7SAndrey Grodzovsky #endif
6013d1b8ec7SAndrey Grodzovsky
6023d1b8ec7SAndrey Grodzovsky if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
6033d1b8ec7SAndrey Grodzovsky DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
6043d1b8ec7SAndrey Grodzovsky "better performance thanks to write-combining\n");
6053d1b8ec7SAndrey Grodzovsky return false;
6063d1b8ec7SAndrey Grodzovsky #else
6073d1b8ec7SAndrey Grodzovsky /* For architectures that don't support WC memory,
6083d1b8ec7SAndrey Grodzovsky * mask out the WC flag from the BO
6093d1b8ec7SAndrey Grodzovsky */
6103d1b8ec7SAndrey Grodzovsky if (!drm_arch_can_wc_memory())
6113d1b8ec7SAndrey Grodzovsky return false;
6123d1b8ec7SAndrey Grodzovsky
6133d1b8ec7SAndrey Grodzovsky return true;
6143d1b8ec7SAndrey Grodzovsky #endif
6153d1b8ec7SAndrey Grodzovsky }
6163d1b8ec7SAndrey Grodzovsky
617cd2454d6SNirmoy Das /**
618cd2454d6SNirmoy Das * amdgpu_bo_create - create an &amdgpu_bo buffer object
619cd2454d6SNirmoy Das * @adev: amdgpu device object
620cd2454d6SNirmoy Das * @bp: parameters to be used for the buffer object
621cd2454d6SNirmoy Das * @bo_ptr: pointer to the buffer object pointer
622cd2454d6SNirmoy Das *
623cd2454d6SNirmoy Das * Creates an &amdgpu_bo buffer object.
624cd2454d6SNirmoy Das *
625cd2454d6SNirmoy Das * Returns:
626cd2454d6SNirmoy Das * 0 for success or a negative error code on failure.
627cd2454d6SNirmoy Das */
amdgpu_bo_create(struct amdgpu_device * adev,struct amdgpu_bo_param * bp,struct amdgpu_bo ** bo_ptr)628cd2454d6SNirmoy Das int amdgpu_bo_create(struct amdgpu_device *adev,
629a906dbb1SChunming Zhou struct amdgpu_bo_param *bp,
6307e5a547fSChunming Zhou struct amdgpu_bo **bo_ptr)
631d38ceaf9SAlex Deucher {
6329251859aSRoger He struct ttm_operation_ctx ctx = {
633a906dbb1SChunming Zhou .interruptible = (bp->type != ttm_bo_type_kernel),
634061468c4SChristian König .no_wait_gpu = bp->no_wait_gpu,
635586052b0SChristian König /* We opt to avoid OOM on system pages allocations */
636586052b0SChristian König .gfp_retry_mayfail = true,
637c44dfe4dSChristian König .allow_res_evict = bp->type != ttm_bo_type_kernel,
638c44dfe4dSChristian König .resv = bp->resv
6399251859aSRoger He };
640d38ceaf9SAlex Deucher struct amdgpu_bo *bo;
641a906dbb1SChunming Zhou unsigned long page_align, size = bp->size;
642d38ceaf9SAlex Deucher int r;
643d38ceaf9SAlex Deucher
644fe57085aSMarek Olšák /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
645fe57085aSMarek Olšák if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
646fe57085aSMarek Olšák /* GWS and OA don't need any alignment. */
647fe57085aSMarek Olšák page_align = bp->byte_align;
64877a2faa5SChristian König size <<= PAGE_SHIFT;
649e3c92eb4SSomalapuram Amaranath
650fe57085aSMarek Olšák } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
651fe57085aSMarek Olšák /* Both size and alignment must be a multiple of 4. */
652fe57085aSMarek Olšák page_align = ALIGN(bp->byte_align, 4);
653fe57085aSMarek Olšák size = ALIGN(size, 4) << PAGE_SHIFT;
654fe57085aSMarek Olšák } else {
655fe57085aSMarek Olšák /* Memory should be aligned at least to a page size. */
656fe57085aSMarek Olšák page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
657d38ceaf9SAlex Deucher size = ALIGN(size, PAGE_SIZE);
658fe57085aSMarek Olšák }
659d38ceaf9SAlex Deucher
660a906dbb1SChunming Zhou if (!amdgpu_bo_validate_size(adev, size, bp->domain))
66179c63123SAndrey Grodzovsky return -ENOMEM;
66279c63123SAndrey Grodzovsky
6639fd5543eSNirmoy Das BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
664d38ceaf9SAlex Deucher
6659fd5543eSNirmoy Das *bo_ptr = NULL;
66631c759bbSChangfeng bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
667d38ceaf9SAlex Deucher if (bo == NULL)
668d38ceaf9SAlex Deucher return -ENOMEM;
6694a580877SLuben Tuikov drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
6706dcba097SChristian König bo->tbo.base.funcs = &amdgpu_gem_object_funcs;
671646b9025SChristian König bo->vm_bo = NULL;
6723f188453SChunming Zhou bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
673aa2b2e28SChunming Zhou bp->domain;
67408082104SChristian König bo->allowed_domains = bo->preferred_domains;
675a906dbb1SChunming Zhou if (bp->type != ttm_bo_type_kernel &&
676fab2cc83SChristian König !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
67708082104SChristian König bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
67808082104SChristian König bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
679d38ceaf9SAlex Deucher
680a906dbb1SChunming Zhou bo->flags = bp->flags;
681a187f17fSOded Gabbay
6823ebfd221SPhilip Yang if (adev->gmc.mem_partitions)
6833ebfd221SPhilip Yang /* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */
6843ebfd221SPhilip Yang bo->xcp_id = bp->xcp_id_plus1 - 1;
6853ebfd221SPhilip Yang else
6863ebfd221SPhilip Yang /* For GPUs without spatial partitioning */
6873ebfd221SPhilip Yang bo->xcp_id = 0;
688f24e924bSPhilip Yang
6893d1b8ec7SAndrey Grodzovsky if (!amdgpu_bo_support_uswc(bo->flags))
690a2e2f299SNils Holland bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
691a187f17fSOded Gabbay
692c09312a6SChristian König bo->tbo.bdev = &adev->mman.bdev;
69347722220SChristian König if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
69447722220SChristian König AMDGPU_GEM_DOMAIN_GDS))
69547722220SChristian König amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
69647722220SChristian König else
697c704ab18SChristian König amdgpu_bo_placement_from_domain(bo, bp->domain);
698a50cb948SJunwei Zhang if (bp->type == ttm_bo_type_kernel)
699b0b13d53SFelix Kuehling bo->tbo.priority = 2;
700b0b13d53SFelix Kuehling else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
701a50cb948SJunwei Zhang bo->tbo.priority = 1;
70208082104SChristian König
70323e24fbbSNirmoy Das if (!bp->destroy)
70423e24fbbSNirmoy Das bp->destroy = &amdgpu_bo_destroy;
70523e24fbbSNirmoy Das
706347987a2SChristian König r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
707f07069daSChristian König &bo->placement, page_align, &ctx, NULL,
70823e24fbbSNirmoy Das bp->resv, bp->destroy);
70908082104SChristian König if (unlikely(r != 0))
710a695e437SChristian König return r;
711a695e437SChristian König
712c8c5e569SAndrey Grodzovsky if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
713a6ff969fSChristian König amdgpu_res_cpu_visible(adev, bo->tbo.resource))
7146af046d2SChristian König amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
7156af046d2SChristian König ctx.bytes_moved);
71600f06b24SJohn Brooks else
7176af046d2SChristian König amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
718fad06127SSamuel Pitoiset
719a906dbb1SChunming Zhou if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
720d3116756SChristian König bo->tbo.resource->mem_type == TTM_PL_VRAM) {
721f54d1867SChris Wilson struct dma_fence *fence;
7224fea83ffSFlora Cui
723a68c7eaaSArunpravin Paneer Selvam r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence);
724c3af1258SChristian König if (unlikely(r))
725c3af1258SChristian König goto fail_unreserve;
726c3af1258SChristian König
7278bb31587SChristian König dma_resv_add_fence(bo->tbo.base.resv, fence,
7288bb31587SChristian König DMA_RESV_USAGE_KERNEL);
729f54d1867SChris Wilson dma_fence_put(fence);
7304fea83ffSFlora Cui }
731a906dbb1SChunming Zhou if (!bp->resv)
73259c66c91SNicolai Hähnle amdgpu_bo_unreserve(bo);
733d38ceaf9SAlex Deucher *bo_ptr = bo;
734d38ceaf9SAlex Deucher
735d38ceaf9SAlex Deucher trace_amdgpu_bo_create(bo);
736d38ceaf9SAlex Deucher
73796cf8271SJohn Brooks /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
738a906dbb1SChunming Zhou if (bp->type == ttm_bo_type_device)
73996cf8271SJohn Brooks bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
74096cf8271SJohn Brooks
741d38ceaf9SAlex Deucher return 0;
7424fea83ffSFlora Cui
7434fea83ffSFlora Cui fail_unreserve:
744a906dbb1SChunming Zhou if (!bp->resv)
74552791eeeSChristian König dma_resv_unlock(bo->tbo.base.resv);
7464fea83ffSFlora Cui amdgpu_bo_unref(&bo);
7474fea83ffSFlora Cui return r;
748d38ceaf9SAlex Deucher }
749d38ceaf9SAlex Deucher
7506f4e8d6eSSamuel Li /**
7519ad0d033SNirmoy Das * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
7529ad0d033SNirmoy Das * @adev: amdgpu device object
7539ad0d033SNirmoy Das * @bp: parameters to be used for the buffer object
7549ad0d033SNirmoy Das * @ubo_ptr: pointer to the buffer object pointer
7559ad0d033SNirmoy Das *
7569ad0d033SNirmoy Das * Create a BO to be used by user application;
7579ad0d033SNirmoy Das *
7589ad0d033SNirmoy Das * Returns:
7599ad0d033SNirmoy Das * 0 for success or a negative error code on failure.
7609ad0d033SNirmoy Das */
7619ad0d033SNirmoy Das
amdgpu_bo_create_user(struct amdgpu_device * adev,struct amdgpu_bo_param * bp,struct amdgpu_bo_user ** ubo_ptr)7629ad0d033SNirmoy Das int amdgpu_bo_create_user(struct amdgpu_device *adev,
7639ad0d033SNirmoy Das struct amdgpu_bo_param *bp,
7649ad0d033SNirmoy Das struct amdgpu_bo_user **ubo_ptr)
7659ad0d033SNirmoy Das {
7669ad0d033SNirmoy Das struct amdgpu_bo *bo_ptr;
7679ad0d033SNirmoy Das int r;
7689ad0d033SNirmoy Das
7699ad0d033SNirmoy Das bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
77023e24fbbSNirmoy Das bp->destroy = &amdgpu_bo_user_destroy;
771cd2454d6SNirmoy Das r = amdgpu_bo_create(adev, bp, &bo_ptr);
7729ad0d033SNirmoy Das if (r)
7739ad0d033SNirmoy Das return r;
7749ad0d033SNirmoy Das
7759ad0d033SNirmoy Das *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
7769ad0d033SNirmoy Das return r;
7779ad0d033SNirmoy Das }
7786fdd6f4aSNirmoy Das
7796fdd6f4aSNirmoy Das /**
7806fdd6f4aSNirmoy Das * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
7816fdd6f4aSNirmoy Das * @adev: amdgpu device object
7826fdd6f4aSNirmoy Das * @bp: parameters to be used for the buffer object
7836fdd6f4aSNirmoy Das * @vmbo_ptr: pointer to the buffer object pointer
7846fdd6f4aSNirmoy Das *
7856fdd6f4aSNirmoy Das * Create a BO to be for GPUVM.
7866fdd6f4aSNirmoy Das *
7876fdd6f4aSNirmoy Das * Returns:
7886fdd6f4aSNirmoy Das * 0 for success or a negative error code on failure.
7896fdd6f4aSNirmoy Das */
7906fdd6f4aSNirmoy Das
amdgpu_bo_create_vm(struct amdgpu_device * adev,struct amdgpu_bo_param * bp,struct amdgpu_bo_vm ** vmbo_ptr)7916fdd6f4aSNirmoy Das int amdgpu_bo_create_vm(struct amdgpu_device *adev,
7926fdd6f4aSNirmoy Das struct amdgpu_bo_param *bp,
7936fdd6f4aSNirmoy Das struct amdgpu_bo_vm **vmbo_ptr)
7946fdd6f4aSNirmoy Das {
7956fdd6f4aSNirmoy Das struct amdgpu_bo *bo_ptr;
7966fdd6f4aSNirmoy Das int r;
7976fdd6f4aSNirmoy Das
7986fdd6f4aSNirmoy Das /* bo_ptr_size will be determined by the caller and it depends on
7996fdd6f4aSNirmoy Das * num of amdgpu_vm_pt entries.
8006fdd6f4aSNirmoy Das */
8016fdd6f4aSNirmoy Das BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
8026fdd6f4aSNirmoy Das r = amdgpu_bo_create(adev, bp, &bo_ptr);
8036fdd6f4aSNirmoy Das if (r)
8046fdd6f4aSNirmoy Das return r;
8056fdd6f4aSNirmoy Das
8066fdd6f4aSNirmoy Das *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
8076fdd6f4aSNirmoy Das return r;
8086fdd6f4aSNirmoy Das }
8096fdd6f4aSNirmoy Das
8109ad0d033SNirmoy Das /**
8116f4e8d6eSSamuel Li * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
8126f4e8d6eSSamuel Li * @bo: &amdgpu_bo buffer object to be mapped
8136f4e8d6eSSamuel Li * @ptr: kernel virtual address to be returned
8146f4e8d6eSSamuel Li *
8156f4e8d6eSSamuel Li * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
8166f4e8d6eSSamuel Li * amdgpu_bo_kptr() to get the kernel virtual address.
8176f4e8d6eSSamuel Li *
8182472e11bSMichel Dänzer * Returns:
8192472e11bSMichel Dänzer * 0 for success or a negative error code on failure.
8206f4e8d6eSSamuel Li */
amdgpu_bo_kmap(struct amdgpu_bo * bo,void ** ptr)821d38ceaf9SAlex Deucher int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
822d38ceaf9SAlex Deucher {
823f5e1c740SChristian König void *kptr;
824587f3c70SChristian König long r;
825d38ceaf9SAlex Deucher
826271c8125SChristian König if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
827271c8125SChristian König return -EPERM;
828271c8125SChristian König
829c35fcfa3SChristian König r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
830c35fcfa3SChristian König false, MAX_SCHEDULE_TIMEOUT);
831c35fcfa3SChristian König if (r < 0)
832c35fcfa3SChristian König return r;
833c35fcfa3SChristian König
834f5e1c740SChristian König kptr = amdgpu_bo_kptr(bo);
835f5e1c740SChristian König if (kptr) {
836f5e1c740SChristian König if (ptr)
837f5e1c740SChristian König *ptr = kptr;
838d38ceaf9SAlex Deucher return 0;
839d38ceaf9SAlex Deucher }
840587f3c70SChristian König
841e3c92eb4SSomalapuram Amaranath r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
842587f3c70SChristian König if (r)
843587f3c70SChristian König return r;
844587f3c70SChristian König
845587f3c70SChristian König if (ptr)
846f5e1c740SChristian König *ptr = amdgpu_bo_kptr(bo);
847587f3c70SChristian König
848d38ceaf9SAlex Deucher return 0;
849d38ceaf9SAlex Deucher }
850d38ceaf9SAlex Deucher
8516f4e8d6eSSamuel Li /**
8526f4e8d6eSSamuel Li * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
8536f4e8d6eSSamuel Li * @bo: &amdgpu_bo buffer object
8546f4e8d6eSSamuel Li *
8556f4e8d6eSSamuel Li * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
8566f4e8d6eSSamuel Li *
8572472e11bSMichel Dänzer * Returns:
8582472e11bSMichel Dänzer * the virtual address of a buffer object area.
8596f4e8d6eSSamuel Li */
amdgpu_bo_kptr(struct amdgpu_bo * bo)860f5e1c740SChristian König void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
861f5e1c740SChristian König {
862f5e1c740SChristian König bool is_iomem;
863f5e1c740SChristian König
864f5e1c740SChristian König return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
865f5e1c740SChristian König }
866f5e1c740SChristian König
8676f4e8d6eSSamuel Li /**
8686f4e8d6eSSamuel Li * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
8696f4e8d6eSSamuel Li * @bo: &amdgpu_bo buffer object to be unmapped
8706f4e8d6eSSamuel Li *
8716f4e8d6eSSamuel Li * Unmaps a kernel map set up by amdgpu_bo_kmap().
8726f4e8d6eSSamuel Li */
amdgpu_bo_kunmap(struct amdgpu_bo * bo)873d38ceaf9SAlex Deucher void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
874d38ceaf9SAlex Deucher {
875f5e1c740SChristian König if (bo->kmap.bo)
876d38ceaf9SAlex Deucher ttm_bo_kunmap(&bo->kmap);
877d38ceaf9SAlex Deucher }
878d38ceaf9SAlex Deucher
8796f4e8d6eSSamuel Li /**
8806f4e8d6eSSamuel Li * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
8816f4e8d6eSSamuel Li * @bo: &amdgpu_bo buffer object
8826f4e8d6eSSamuel Li *
8836f4e8d6eSSamuel Li * References the contained &ttm_buffer_object.
8846f4e8d6eSSamuel Li *
8852472e11bSMichel Dänzer * Returns:
8862472e11bSMichel Dänzer * a refcounted pointer to the &amdgpu_bo buffer object.
8876f4e8d6eSSamuel Li */
amdgpu_bo_ref(struct amdgpu_bo * bo)888d38ceaf9SAlex Deucher struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
889d38ceaf9SAlex Deucher {
890d38ceaf9SAlex Deucher if (bo == NULL)
891d38ceaf9SAlex Deucher return NULL;
892d38ceaf9SAlex Deucher
8936dcba097SChristian König drm_gem_object_get(&bo->tbo.base);
894d38ceaf9SAlex Deucher return bo;
895d38ceaf9SAlex Deucher }
896d38ceaf9SAlex Deucher
8976f4e8d6eSSamuel Li /**
8986f4e8d6eSSamuel Li * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
8996f4e8d6eSSamuel Li * @bo: &amdgpu_bo buffer object
9006f4e8d6eSSamuel Li *
9016f4e8d6eSSamuel Li * Unreferences the contained &ttm_buffer_object and clear the pointer
9026f4e8d6eSSamuel Li */
amdgpu_bo_unref(struct amdgpu_bo ** bo)903d38ceaf9SAlex Deucher void amdgpu_bo_unref(struct amdgpu_bo **bo)
904d38ceaf9SAlex Deucher {
905d38ceaf9SAlex Deucher if ((*bo) == NULL)
906d38ceaf9SAlex Deucher return;
907d38ceaf9SAlex Deucher
9086dcba097SChristian König drm_gem_object_put(&(*bo)->tbo.base);
909d38ceaf9SAlex Deucher *bo = NULL;
910d38ceaf9SAlex Deucher }
911d38ceaf9SAlex Deucher
9126f4e8d6eSSamuel Li /**
913f2be7b39SChristian König * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
9146f4e8d6eSSamuel Li * @bo: &amdgpu_bo buffer object to be pinned
9156f4e8d6eSSamuel Li * @domain: domain to be pinned to
9166f4e8d6eSSamuel Li *
917f2be7b39SChristian König * Pins the buffer object according to requested domain. If the memory is
918f2be7b39SChristian König * unbound gart memory, binds the pages into gart table. Adjusts pin_count and
919f2be7b39SChristian König * pin_size accordingly.
9206f4e8d6eSSamuel Li *
9216f4e8d6eSSamuel Li * Pinning means to lock pages in memory along with keeping them at a fixed
9226f4e8d6eSSamuel Li * offset. It is required when a buffer can not be moved, for example, when
9236f4e8d6eSSamuel Li * a display buffer is being scanned out.
9246f4e8d6eSSamuel Li *
9252472e11bSMichel Dänzer * Returns:
9262472e11bSMichel Dänzer * 0 for success or a negative error code on failure.
9276f4e8d6eSSamuel Li */
amdgpu_bo_pin(struct amdgpu_bo * bo,u32 domain)928f2be7b39SChristian König int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
929d38ceaf9SAlex Deucher {
930a7d64de6SChristian König struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
93119be5570SChristian König struct ttm_operation_ctx ctx = { false, false };
932d38ceaf9SAlex Deucher int r, i;
933d38ceaf9SAlex Deucher
934cc325d19SChristian König if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
935d38ceaf9SAlex Deucher return -EPERM;
936d38ceaf9SAlex Deucher
937f5ba1404SLeo Li /* Check domain to be pinned to against preferred domains */
938f5ba1404SLeo Li if (bo->preferred_domains & domain)
939f5ba1404SLeo Li domain = bo->preferred_domains & domain;
940f5ba1404SLeo Li
941803d89adSChristopher James Halse Rogers /* A shared bo cannot be migrated to VRAM */
9428c505bdcSChristian König if (bo->tbo.base.import_attach) {
9439b3f217fSSamuel Li if (domain & AMDGPU_GEM_DOMAIN_GTT)
9449b3f217fSSamuel Li domain = AMDGPU_GEM_DOMAIN_GTT;
9459b3f217fSSamuel Li else
946803d89adSChristopher James Halse Rogers return -EINVAL;
9479b3f217fSSamuel Li }
948803d89adSChristopher James Halse Rogers
9494671078eSChristian König if (bo->tbo.pin_count) {
950d3116756SChristian König uint32_t mem_type = bo->tbo.resource->mem_type;
951d3116756SChristian König uint32_t mem_flags = bo->tbo.resource->placement;
952408778e8SFlora Cui
953f5318959SChristian König if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
954408778e8SFlora Cui return -EINVAL;
955408778e8SFlora Cui
956e1a4b67aSxinhui pan if ((mem_type == TTM_PL_VRAM) &&
957e1a4b67aSxinhui pan (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
958eda1068dSFelix Kuehling !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
959eda1068dSFelix Kuehling return -EINVAL;
960eda1068dSFelix Kuehling
9614671078eSChristian König ttm_bo_pin(&bo->tbo);
962d38ceaf9SAlex Deucher return 0;
963d38ceaf9SAlex Deucher }
96403f48dd5SChristian König
9659deb0b3dSChristian König /* This assumes only APU display buffers are pinned with (VRAM|GTT).
9669deb0b3dSChristian König * See function amdgpu_display_supported_domains()
9679deb0b3dSChristian König */
968d035f84dSYifan Zhang domain = amdgpu_bo_get_preferred_domain(adev, domain);
9699deb0b3dSChristian König
970a448cb00SChristian König if (bo->tbo.base.import_attach)
971a448cb00SChristian König dma_buf_pin(bo->tbo.base.import_attach);
972a448cb00SChristian König
973e9c7577cSChristian König /* force to pin into visible video ram */
974e9c7577cSChristian König if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
975e9c7577cSChristian König bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
976c704ab18SChristian König amdgpu_bo_placement_from_domain(bo, domain);
977d38ceaf9SAlex Deucher for (i = 0; i < bo->placement.num_placement; i++) {
978e362b7c8SArunpravin Paneer Selvam if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
979e362b7c8SArunpravin Paneer Selvam bo->placements[i].mem_type == TTM_PL_VRAM)
980e362b7c8SArunpravin Paneer Selvam bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
981d38ceaf9SAlex Deucher }
982d38ceaf9SAlex Deucher
98319be5570SChristian König r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
9846681c5ebSChristian König if (unlikely(r)) {
985a7d64de6SChristian König dev_err(adev->dev, "%p pin failed\n", bo);
9866681c5ebSChristian König goto error;
9876681c5ebSChristian König }
98807306b4fSChunming Zhou
9894671078eSChristian König ttm_bo_pin(&bo->tbo);
9905e91fb57SChristian König
9918fb0efb1STvrtko Ursulin if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
992a5ccfe5cSMichel Dänzer atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
993a5ccfe5cSMichel Dänzer atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
994a5ccfe5cSMichel Dänzer &adev->visible_pin_size);
9958fb0efb1STvrtko Ursulin } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
996a5ccfe5cSMichel Dänzer atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
997d38ceaf9SAlex Deucher }
9986681c5ebSChristian König
9996681c5ebSChristian König error:
1000d38ceaf9SAlex Deucher return r;
1001d38ceaf9SAlex Deucher }
1002d38ceaf9SAlex Deucher
10036f4e8d6eSSamuel Li /**
10046f4e8d6eSSamuel Li * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
10056f4e8d6eSSamuel Li * @bo: &amdgpu_bo buffer object to be unpinned
10066f4e8d6eSSamuel Li *
10076f4e8d6eSSamuel Li * Decreases the pin_count, and clears the flags if pin_count reaches 0.
10086f4e8d6eSSamuel Li * Changes placement and pin size accordingly.
10096f4e8d6eSSamuel Li *
10102472e11bSMichel Dänzer * Returns:
10112472e11bSMichel Dänzer * 0 for success or a negative error code on failure.
10126f4e8d6eSSamuel Li */
amdgpu_bo_unpin(struct amdgpu_bo * bo)10134671078eSChristian König void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1014d38ceaf9SAlex Deucher {
1015e2ac8531SChristian König struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1016e2ac8531SChristian König
10174671078eSChristian König ttm_bo_unpin(&bo->tbo);
10184671078eSChristian König if (bo->tbo.pin_count)
10194671078eSChristian König return;
10206681c5ebSChristian König
1021a448cb00SChristian König if (bo->tbo.base.import_attach)
1022a448cb00SChristian König dma_buf_unpin(bo->tbo.base.import_attach);
1023e2ac8531SChristian König
1024d3116756SChristian König if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
1025e2ac8531SChristian König atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
1026e2ac8531SChristian König atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
1027e2ac8531SChristian König &adev->visible_pin_size);
1028d3116756SChristian König } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
1029e2ac8531SChristian König atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
1030e2ac8531SChristian König }
1031dc3499c7SAlex Deucher
1032d38ceaf9SAlex Deucher }
1033d38ceaf9SAlex Deucher
10341d6ecab1SSrinivasan Shanmugam static const char * const amdgpu_vram_names[] = {
10351f8628c7SAlex Deucher "UNKNOWN",
10361f8628c7SAlex Deucher "GDDR1",
10371f8628c7SAlex Deucher "DDR2",
10381f8628c7SAlex Deucher "GDDR3",
10391f8628c7SAlex Deucher "GDDR4",
10401f8628c7SAlex Deucher "GDDR5",
10411f8628c7SAlex Deucher "HBM",
1042bc227cfaSTom St Denis "DDR3",
1043bc227cfaSTom St Denis "DDR4",
10445228fe30SHawking Zhang "GDDR6",
1045d534ca71SAlex Deucher "DDR5",
1046d534ca71SAlex Deucher "LPDDR4",
1047d534ca71SAlex Deucher "LPDDR5"
10481f8628c7SAlex Deucher };
10491f8628c7SAlex Deucher
10506f4e8d6eSSamuel Li /**
10516f4e8d6eSSamuel Li * amdgpu_bo_init - initialize memory manager
10526f4e8d6eSSamuel Li * @adev: amdgpu device object
10536f4e8d6eSSamuel Li *
10546f4e8d6eSSamuel Li * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
10556f4e8d6eSSamuel Li *
10562472e11bSMichel Dänzer * Returns:
10572472e11bSMichel Dänzer * 0 for success or a negative error code on failure.
10586f4e8d6eSSamuel Li */
amdgpu_bo_init(struct amdgpu_device * adev)1059d38ceaf9SAlex Deucher int amdgpu_bo_init(struct amdgpu_device *adev)
1060d38ceaf9SAlex Deucher {
106135d5f224SOak Zeng /* On A+A platform, VRAM can be mapped as WB */
1062228ce176SRajneesh Bhardwaj if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
10637cf321d1SDave Airlie /* reserve PAT memory space to WC for VRAM */
106426db557eSNirmoy Das int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1065770d13b1SChristian König adev->gmc.aper_size);
10667cf321d1SDave Airlie
106726db557eSNirmoy Das if (r) {
106826db557eSNirmoy Das DRM_ERROR("Unable to set WC memtype for the aperture base\n");
106926db557eSNirmoy Das return r;
107026db557eSNirmoy Das }
107126db557eSNirmoy Das
1072d38ceaf9SAlex Deucher /* Add an MTRR for the VRAM */
1073770d13b1SChristian König adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1074770d13b1SChristian König adev->gmc.aper_size);
107535d5f224SOak Zeng }
107635d5f224SOak Zeng
1077d38ceaf9SAlex Deucher DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1078770d13b1SChristian König adev->gmc.mc_vram_size >> 20,
1079770d13b1SChristian König (unsigned long long)adev->gmc.aper_size >> 20);
10801f8628c7SAlex Deucher DRM_INFO("RAM width %dbits %s\n",
1081770d13b1SChristian König adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1082d38ceaf9SAlex Deucher return amdgpu_ttm_init(adev);
1083d38ceaf9SAlex Deucher }
1084d38ceaf9SAlex Deucher
10856f4e8d6eSSamuel Li /**
10866f4e8d6eSSamuel Li * amdgpu_bo_fini - tear down memory manager
10876f4e8d6eSSamuel Li * @adev: amdgpu device object
10886f4e8d6eSSamuel Li *
10896f4e8d6eSSamuel Li * Reverses amdgpu_bo_init() to tear down memory manager.
10906f4e8d6eSSamuel Li */
amdgpu_bo_fini(struct amdgpu_device * adev)1091d38ceaf9SAlex Deucher void amdgpu_bo_fini(struct amdgpu_device *adev)
1092d38ceaf9SAlex Deucher {
109362d5f9f7SLeslie Shi int idx;
109462d5f9f7SLeslie Shi
1095d38ceaf9SAlex Deucher amdgpu_ttm_fini(adev);
109662d5f9f7SLeslie Shi
109762d5f9f7SLeslie Shi if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1098a0ba1279SLijo Lazar if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
109962d5f9f7SLeslie Shi arch_phys_wc_del(adev->gmc.vram_mtrr);
110062d5f9f7SLeslie Shi arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
110162d5f9f7SLeslie Shi }
110262d5f9f7SLeslie Shi drm_dev_exit(idx);
110362d5f9f7SLeslie Shi }
1104d38ceaf9SAlex Deucher }
1105d38ceaf9SAlex Deucher
11066f4e8d6eSSamuel Li /**
11076f4e8d6eSSamuel Li * amdgpu_bo_set_tiling_flags - set tiling flags
11086f4e8d6eSSamuel Li * @bo: &amdgpu_bo buffer object
11096f4e8d6eSSamuel Li * @tiling_flags: new flags
11106f4e8d6eSSamuel Li *
11116f4e8d6eSSamuel Li * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
11126f4e8d6eSSamuel Li * kernel driver to set the tiling flags on a buffer.
11136f4e8d6eSSamuel Li *
11142472e11bSMichel Dänzer * Returns:
11152472e11bSMichel Dänzer * 0 for success or a negative error code on failure.
11166f4e8d6eSSamuel Li */
amdgpu_bo_set_tiling_flags(struct amdgpu_bo * bo,u64 tiling_flags)1117d38ceaf9SAlex Deucher int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1118d38ceaf9SAlex Deucher {
11199079ac76SMarek Olšák struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1120cc1bcf85SNirmoy Das struct amdgpu_bo_user *ubo;
11219079ac76SMarek Olšák
1122030bb4adSNirmoy Das BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
11239079ac76SMarek Olšák if (adev->family <= AMDGPU_FAMILY_CZ &&
11249079ac76SMarek Olšák AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1125d38ceaf9SAlex Deucher return -EINVAL;
1126d38ceaf9SAlex Deucher
1127cc1bcf85SNirmoy Das ubo = to_amdgpu_bo_user(bo);
1128cc1bcf85SNirmoy Das ubo->tiling_flags = tiling_flags;
1129d38ceaf9SAlex Deucher return 0;
1130d38ceaf9SAlex Deucher }
1131d38ceaf9SAlex Deucher
11326f4e8d6eSSamuel Li /**
11336f4e8d6eSSamuel Li * amdgpu_bo_get_tiling_flags - get tiling flags
11346f4e8d6eSSamuel Li * @bo: &amdgpu_bo buffer object
11356f4e8d6eSSamuel Li * @tiling_flags: returned flags
11366f4e8d6eSSamuel Li *
11376f4e8d6eSSamuel Li * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
11386f4e8d6eSSamuel Li * set the tiling flags on a buffer.
11396f4e8d6eSSamuel Li */
amdgpu_bo_get_tiling_flags(struct amdgpu_bo * bo,u64 * tiling_flags)1140d38ceaf9SAlex Deucher void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1141d38ceaf9SAlex Deucher {
1142cc1bcf85SNirmoy Das struct amdgpu_bo_user *ubo;
1143cc1bcf85SNirmoy Das
1144030bb4adSNirmoy Das BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
114552791eeeSChristian König dma_resv_assert_held(bo->tbo.base.resv);
1146cc1bcf85SNirmoy Das ubo = to_amdgpu_bo_user(bo);
1147d38ceaf9SAlex Deucher
1148d38ceaf9SAlex Deucher if (tiling_flags)
1149cc1bcf85SNirmoy Das *tiling_flags = ubo->tiling_flags;
1150d38ceaf9SAlex Deucher }
1151d38ceaf9SAlex Deucher
11526f4e8d6eSSamuel Li /**
11536f4e8d6eSSamuel Li * amdgpu_bo_set_metadata - set metadata
11546f4e8d6eSSamuel Li * @bo: &amdgpu_bo buffer object
11556f4e8d6eSSamuel Li * @metadata: new metadata
11566f4e8d6eSSamuel Li * @metadata_size: size of the new metadata
11576f4e8d6eSSamuel Li * @flags: flags of the new metadata
11586f4e8d6eSSamuel Li *
11596f4e8d6eSSamuel Li * Sets buffer object's metadata, its size and flags.
11606f4e8d6eSSamuel Li * Used via GEM ioctl.
11616f4e8d6eSSamuel Li *
11622472e11bSMichel Dänzer * Returns:
11632472e11bSMichel Dänzer * 0 for success or a negative error code on failure.
11646f4e8d6eSSamuel Li */
amdgpu_bo_set_metadata(struct amdgpu_bo * bo,void * metadata,u32 metadata_size,uint64_t flags)1165d38ceaf9SAlex Deucher int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata,
11661d6ecab1SSrinivasan Shanmugam u32 metadata_size, uint64_t flags)
1167d38ceaf9SAlex Deucher {
1168cc1bcf85SNirmoy Das struct amdgpu_bo_user *ubo;
1169d38ceaf9SAlex Deucher void *buffer;
1170d38ceaf9SAlex Deucher
1171030bb4adSNirmoy Das BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1172cc1bcf85SNirmoy Das ubo = to_amdgpu_bo_user(bo);
1173d38ceaf9SAlex Deucher if (!metadata_size) {
1174cc1bcf85SNirmoy Das if (ubo->metadata_size) {
1175cc1bcf85SNirmoy Das kfree(ubo->metadata);
1176cc1bcf85SNirmoy Das ubo->metadata = NULL;
1177cc1bcf85SNirmoy Das ubo->metadata_size = 0;
1178d38ceaf9SAlex Deucher }
1179d38ceaf9SAlex Deucher return 0;
1180d38ceaf9SAlex Deucher }
1181d38ceaf9SAlex Deucher
1182d38ceaf9SAlex Deucher if (metadata == NULL)
1183d38ceaf9SAlex Deucher return -EINVAL;
1184d38ceaf9SAlex Deucher
118571affda5SAndrzej Hajda buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1186d38ceaf9SAlex Deucher if (buffer == NULL)
1187d38ceaf9SAlex Deucher return -ENOMEM;
1188d38ceaf9SAlex Deucher
1189cc1bcf85SNirmoy Das kfree(ubo->metadata);
1190cc1bcf85SNirmoy Das ubo->metadata_flags = flags;
1191cc1bcf85SNirmoy Das ubo->metadata = buffer;
1192cc1bcf85SNirmoy Das ubo->metadata_size = metadata_size;
1193d38ceaf9SAlex Deucher
1194d38ceaf9SAlex Deucher return 0;
1195d38ceaf9SAlex Deucher }
1196d38ceaf9SAlex Deucher
11976f4e8d6eSSamuel Li /**
11986f4e8d6eSSamuel Li * amdgpu_bo_get_metadata - get metadata
11996f4e8d6eSSamuel Li * @bo: &amdgpu_bo buffer object
12006f4e8d6eSSamuel Li * @buffer: returned metadata
12016f4e8d6eSSamuel Li * @buffer_size: size of the buffer
12026f4e8d6eSSamuel Li * @metadata_size: size of the returned metadata
12036f4e8d6eSSamuel Li * @flags: flags of the returned metadata
12046f4e8d6eSSamuel Li *
12056f4e8d6eSSamuel Li * Gets buffer object's metadata, its size and flags. buffer_size shall not be
12066f4e8d6eSSamuel Li * less than metadata_size.
12076f4e8d6eSSamuel Li * Used via GEM ioctl.
12086f4e8d6eSSamuel Li *
12092472e11bSMichel Dänzer * Returns:
12102472e11bSMichel Dänzer * 0 for success or a negative error code on failure.
12116f4e8d6eSSamuel Li */
amdgpu_bo_get_metadata(struct amdgpu_bo * bo,void * buffer,size_t buffer_size,uint32_t * metadata_size,uint64_t * flags)1212d38ceaf9SAlex Deucher int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1213d38ceaf9SAlex Deucher size_t buffer_size, uint32_t *metadata_size,
1214d38ceaf9SAlex Deucher uint64_t *flags)
1215d38ceaf9SAlex Deucher {
1216cc1bcf85SNirmoy Das struct amdgpu_bo_user *ubo;
1217cc1bcf85SNirmoy Das
1218d38ceaf9SAlex Deucher if (!buffer && !metadata_size)
1219d38ceaf9SAlex Deucher return -EINVAL;
1220d38ceaf9SAlex Deucher
1221030bb4adSNirmoy Das BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
1222cc1bcf85SNirmoy Das ubo = to_amdgpu_bo_user(bo);
1223eba98523SShiwu Zhang if (metadata_size)
1224eba98523SShiwu Zhang *metadata_size = ubo->metadata_size;
1225eba98523SShiwu Zhang
1226d38ceaf9SAlex Deucher if (buffer) {
1227cc1bcf85SNirmoy Das if (buffer_size < ubo->metadata_size)
1228d38ceaf9SAlex Deucher return -EINVAL;
1229d38ceaf9SAlex Deucher
1230cc1bcf85SNirmoy Das if (ubo->metadata_size)
1231cc1bcf85SNirmoy Das memcpy(buffer, ubo->metadata, ubo->metadata_size);
1232d38ceaf9SAlex Deucher }
1233d38ceaf9SAlex Deucher
1234d38ceaf9SAlex Deucher if (flags)
1235cc1bcf85SNirmoy Das *flags = ubo->metadata_flags;
1236d38ceaf9SAlex Deucher
1237d38ceaf9SAlex Deucher return 0;
1238d38ceaf9SAlex Deucher }
1239d38ceaf9SAlex Deucher
12406f4e8d6eSSamuel Li /**
12416f4e8d6eSSamuel Li * amdgpu_bo_move_notify - notification about a memory move
12426f4e8d6eSSamuel Li * @bo: pointer to a buffer object
12436f4e8d6eSSamuel Li * @evict: if this move is evicting the buffer from the graphics address space
1244d3a9331aSChristian König * @new_mem: new resource for backing the BO
12456f4e8d6eSSamuel Li *
12466f4e8d6eSSamuel Li * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
12476f4e8d6eSSamuel Li * bookkeeping.
12486f4e8d6eSSamuel Li * TTM driver callback which is called when ttm moves a buffer.
12496f4e8d6eSSamuel Li */
amdgpu_bo_move_notify(struct ttm_buffer_object * bo,bool evict,struct ttm_resource * new_mem)1250d3a9331aSChristian König void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1251d3a9331aSChristian König bool evict,
1252d3a9331aSChristian König struct ttm_resource *new_mem)
1253d38ceaf9SAlex Deucher {
1254d3a9331aSChristian König struct ttm_resource *old_mem = bo->resource;
1255765e7fbfSChristian König struct amdgpu_bo *abo;
1256d38ceaf9SAlex Deucher
1257c704ab18SChristian König if (!amdgpu_bo_is_amdgpu_bo(bo))
1258d38ceaf9SAlex Deucher return;
1259d38ceaf9SAlex Deucher
1260b82485fdSAndres Rodriguez abo = ttm_to_amdgpu_bo(bo);
126174ef9527SYunxiang Li amdgpu_vm_bo_move(abo, new_mem, evict);
1262d38ceaf9SAlex Deucher
12636375bbb4SChristian König amdgpu_bo_kunmap(abo);
12646375bbb4SChristian König
12652d4dad27SChristian König if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1266d3a9331aSChristian König old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
12672d4dad27SChristian König dma_buf_move_notify(abo->tbo.base.dma_buf);
12682d4dad27SChristian König
1269d3a9331aSChristian König /* move_notify is called before move happens */
1270d3a9331aSChristian König trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
1271d3a9331aSChristian König old_mem ? old_mem->mem_type : -1);
1272d38ceaf9SAlex Deucher }
1273d38ceaf9SAlex Deucher
12746f4e8d6eSSamuel Li /**
1275736b1729SKevin Wang * amdgpu_bo_release_notify - notification about a BO being released
1276ab2f7a5cSFelix Kuehling * @bo: pointer to a buffer object
1277ab2f7a5cSFelix Kuehling *
1278ab2f7a5cSFelix Kuehling * Wipes VRAM buffers whose contents should not be leaked before the
1279ab2f7a5cSFelix Kuehling * memory is released.
1280ab2f7a5cSFelix Kuehling */
amdgpu_bo_release_notify(struct ttm_buffer_object * bo)1281ab2f7a5cSFelix Kuehling void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1282ab2f7a5cSFelix Kuehling {
128332f90e65SGuchun Chen struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1284ab2f7a5cSFelix Kuehling struct dma_fence *fence = NULL;
1285ab2f7a5cSFelix Kuehling struct amdgpu_bo *abo;
1286ab2f7a5cSFelix Kuehling int r;
1287ab2f7a5cSFelix Kuehling
1288ab2f7a5cSFelix Kuehling if (!amdgpu_bo_is_amdgpu_bo(bo))
1289ab2f7a5cSFelix Kuehling return;
1290ab2f7a5cSFelix Kuehling
1291ab2f7a5cSFelix Kuehling abo = ttm_to_amdgpu_bo(bo);
1292ab2f7a5cSFelix Kuehling
129365d2765dSChristian König WARN_ON(abo->vm_bo);
129465d2765dSChristian König
1295ab2f7a5cSFelix Kuehling if (abo->kfd_bo)
12965702d052SFelix Kuehling amdgpu_amdkfd_release_notify(abo);
1297ab2f7a5cSFelix Kuehling
1298cb0de06dSChristian König /*
1299cb0de06dSChristian König * We lock the private dma_resv object here and since the BO is about to
1300cb0de06dSChristian König * be released nobody else should have a pointer to it.
1301cb0de06dSChristian König * So when this locking here fails something is wrong with the reference
1302cb0de06dSChristian König * counting.
1303cb0de06dSChristian König */
1304cb0de06dSChristian König if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv)))
1305cb0de06dSChristian König return;
1306cb0de06dSChristian König
1307cb0de06dSChristian König amdgpu_amdkfd_remove_all_eviction_fences(abo);
1308f4a3c42bSxinhui pan
130963af82cfSChristian König if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
131032f90e65SGuchun Chen !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
131193bb18d2Slyndonli adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
1312cb0de06dSChristian König goto out;
1313ab2f7a5cSFelix Kuehling
1314cb0de06dSChristian König r = dma_resv_reserve_fences(&bo->base._resv, 1);
1315cb0de06dSChristian König if (r)
1316cb0de06dSChristian König goto out;
1317ab2f7a5cSFelix Kuehling
1318cb0de06dSChristian König r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true);
1319cb0de06dSChristian König if (WARN_ON(r))
1320cb0de06dSChristian König goto out;
1321cb0de06dSChristian König
1322a68c7eaaSArunpravin Paneer Selvam amdgpu_vram_mgr_set_cleared(bo->resource);
1323cb0de06dSChristian König dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL);
1324ab2f7a5cSFelix Kuehling dma_fence_put(fence);
1325ab2f7a5cSFelix Kuehling
1326cb0de06dSChristian König out:
1327cb0de06dSChristian König dma_resv_unlock(&bo->base._resv);
1328ab2f7a5cSFelix Kuehling }
1329ab2f7a5cSFelix Kuehling
1330ab2f7a5cSFelix Kuehling /**
13316f4e8d6eSSamuel Li * amdgpu_bo_fault_reserve_notify - notification about a memory fault
13326f4e8d6eSSamuel Li * @bo: pointer to a buffer object
13336f4e8d6eSSamuel Li *
13346f4e8d6eSSamuel Li * Notifies the driver we are taking a fault on this BO and have reserved it,
13356f4e8d6eSSamuel Li * also performs bookkeeping.
13366f4e8d6eSSamuel Li * TTM driver callback for dealing with vm faults.
13376f4e8d6eSSamuel Li *
13382472e11bSMichel Dänzer * Returns:
13392472e11bSMichel Dänzer * 0 for success or a negative error code on failure.
13406f4e8d6eSSamuel Li */
amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object * bo)1341d3ef581aSChristian König vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1342d38ceaf9SAlex Deucher {
1343a7d64de6SChristian König struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
134419be5570SChristian König struct ttm_operation_ctx ctx = { false, false };
1345d3ef581aSChristian König struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
134696cf8271SJohn Brooks int r;
1347d38ceaf9SAlex Deucher
134896cf8271SJohn Brooks /* Remember that this BO was accessed by the CPU */
134996cf8271SJohn Brooks abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
135096cf8271SJohn Brooks
1351a6ff969fSChristian König if (amdgpu_res_cpu_visible(adev, bo->resource))
13525fb1941dSChristian König return 0;
13535fb1941dSChristian König
1354104ece97SMichel Dänzer /* Can't move a pinned BO to visible VRAM */
13554671078eSChristian König if (abo->tbo.pin_count > 0)
1356d3ef581aSChristian König return VM_FAULT_SIGBUS;
1357104ece97SMichel Dänzer
1358d38ceaf9SAlex Deucher /* hurrah the memory is not visible ! */
135968e2c5ffSMarek Olšák atomic64_inc(&adev->num_vram_cpu_page_faults);
1360c704ab18SChristian König amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
136141d9a6a7SJohn Brooks AMDGPU_GEM_DOMAIN_GTT);
136241d9a6a7SJohn Brooks
136341d9a6a7SJohn Brooks /* Avoid costly evictions; only set GTT as a busy placement */
1364a78a8da5SSomalapuram Amaranath abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
136541d9a6a7SJohn Brooks
136619be5570SChristian König r = ttm_bo_validate(bo, &abo->placement, &ctx);
1367d3ef581aSChristian König if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1368d3ef581aSChristian König return VM_FAULT_NOPAGE;
1369d3ef581aSChristian König else if (unlikely(r))
1370d3ef581aSChristian König return VM_FAULT_SIGBUS;
13715fb1941dSChristian König
13725fb1941dSChristian König /* this should never happen */
1373d3116756SChristian König if (bo->resource->mem_type == TTM_PL_VRAM &&
1374a6ff969fSChristian König !amdgpu_res_cpu_visible(adev, bo->resource))
1375d3ef581aSChristian König return VM_FAULT_SIGBUS;
13765fb1941dSChristian König
1377d3ef581aSChristian König ttm_bo_move_to_lru_tail_unlocked(bo);
1378d38ceaf9SAlex Deucher return 0;
1379d38ceaf9SAlex Deucher }
1380d38ceaf9SAlex Deucher
1381d38ceaf9SAlex Deucher /**
1382d38ceaf9SAlex Deucher * amdgpu_bo_fence - add fence to buffer object
1383d38ceaf9SAlex Deucher *
1384d38ceaf9SAlex Deucher * @bo: buffer object in question
1385d38ceaf9SAlex Deucher * @fence: fence to add
1386d38ceaf9SAlex Deucher * @shared: true if fence should be added shared
1387d38ceaf9SAlex Deucher *
1388d38ceaf9SAlex Deucher */
amdgpu_bo_fence(struct amdgpu_bo * bo,struct dma_fence * fence,bool shared)1389f54d1867SChris Wilson void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1390d38ceaf9SAlex Deucher bool shared)
1391d38ceaf9SAlex Deucher {
139252791eeeSChristian König struct dma_resv *resv = bo->tbo.base.resv;
1393c8d4c18bSChristian König int r;
1394c8d4c18bSChristian König
1395c8d4c18bSChristian König r = dma_resv_reserve_fences(resv, 1);
1396c8d4c18bSChristian König if (r) {
1397c8d4c18bSChristian König /* As last resort on OOM we block for the fence */
1398c8d4c18bSChristian König dma_fence_wait(fence, false);
1399c8d4c18bSChristian König return;
1400c8d4c18bSChristian König }
1401d38ceaf9SAlex Deucher
140273511edfSChristian König dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
140373511edfSChristian König DMA_RESV_USAGE_WRITE);
1404d38ceaf9SAlex Deucher }
1405cdb7e8f2SChristian König
1406cdb7e8f2SChristian König /**
14079f3cc18dSChristian König * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
1408e8e32426SFelix Kuehling *
14099f3cc18dSChristian König * @adev: amdgpu device pointer
14109f3cc18dSChristian König * @resv: reservation object to sync to
14119f3cc18dSChristian König * @sync_mode: synchronization mode
1412e8e32426SFelix Kuehling * @owner: fence owner
1413e8e32426SFelix Kuehling * @intr: Whether the wait is interruptible
1414e8e32426SFelix Kuehling *
14159f3cc18dSChristian König * Extract the fences from the reservation object and waits for them to finish.
14169f3cc18dSChristian König *
14179f3cc18dSChristian König * Returns:
14189f3cc18dSChristian König * 0 on success, errno otherwise.
14199f3cc18dSChristian König */
amdgpu_bo_sync_wait_resv(struct amdgpu_device * adev,struct dma_resv * resv,enum amdgpu_sync_mode sync_mode,void * owner,bool intr)14209f3cc18dSChristian König int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
14219f3cc18dSChristian König enum amdgpu_sync_mode sync_mode, void *owner,
14229f3cc18dSChristian König bool intr)
14239f3cc18dSChristian König {
14249f3cc18dSChristian König struct amdgpu_sync sync;
14259f3cc18dSChristian König int r;
14269f3cc18dSChristian König
14279f3cc18dSChristian König amdgpu_sync_create(&sync);
14289f3cc18dSChristian König amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
14299f3cc18dSChristian König r = amdgpu_sync_wait(&sync, intr);
14309f3cc18dSChristian König amdgpu_sync_free(&sync);
14319f3cc18dSChristian König return r;
14329f3cc18dSChristian König }
14339f3cc18dSChristian König
14349f3cc18dSChristian König /**
14359f3cc18dSChristian König * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
14369f3cc18dSChristian König * @bo: buffer object to wait for
14379f3cc18dSChristian König * @owner: fence owner
14389f3cc18dSChristian König * @intr: Whether the wait is interruptible
14399f3cc18dSChristian König *
14409f3cc18dSChristian König * Wrapper to wait for fences in a BO.
1441e8e32426SFelix Kuehling * Returns:
1442e8e32426SFelix Kuehling * 0 on success, errno otherwise.
1443e8e32426SFelix Kuehling */
amdgpu_bo_sync_wait(struct amdgpu_bo * bo,void * owner,bool intr)1444e8e32426SFelix Kuehling int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1445e8e32426SFelix Kuehling {
1446e8e32426SFelix Kuehling struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1447e8e32426SFelix Kuehling
14489f3cc18dSChristian König return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
14499f3cc18dSChristian König AMDGPU_SYNC_NE_OWNER, owner, intr);
1450e8e32426SFelix Kuehling }
1451e8e32426SFelix Kuehling
1452e8e32426SFelix Kuehling /**
1453cdb7e8f2SChristian König * amdgpu_bo_gpu_offset - return GPU offset of bo
1454cdb7e8f2SChristian König * @bo: amdgpu object for which we query the offset
1455cdb7e8f2SChristian König *
1456cdb7e8f2SChristian König * Note: object should either be pinned or reserved when calling this
1457cdb7e8f2SChristian König * function, it might be useful to add check for this for debugging.
14582472e11bSMichel Dänzer *
14592472e11bSMichel Dänzer * Returns:
14602472e11bSMichel Dänzer * current GPU offset of the object.
1461cdb7e8f2SChristian König */
amdgpu_bo_gpu_offset(struct amdgpu_bo * bo)1462cdb7e8f2SChristian König u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1463cdb7e8f2SChristian König {
1464d3116756SChristian König WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
146552791eeeSChristian König WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
14664671078eSChristian König !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1467d3116756SChristian König WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
1468d3116756SChristian König WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
146903f48dd5SChristian König !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1470cdb7e8f2SChristian König
1471b1a8ef95SNirmoy Das return amdgpu_bo_gpu_offset_no_check(bo);
1472b1a8ef95SNirmoy Das }
1473b1a8ef95SNirmoy Das
1474b1a8ef95SNirmoy Das /**
1475b1a8ef95SNirmoy Das * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
1476b1a8ef95SNirmoy Das * @bo: amdgpu object for which we query the offset
1477b1a8ef95SNirmoy Das *
1478b1a8ef95SNirmoy Das * Returns:
1479b1a8ef95SNirmoy Das * current GPU offset of the object without raising warnings.
1480b1a8ef95SNirmoy Das */
amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo * bo)1481b1a8ef95SNirmoy Das u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1482b1a8ef95SNirmoy Das {
1483b1a8ef95SNirmoy Das struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1484ca0b0069SAlex Deucher uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
1485b1a8ef95SNirmoy Das
1486ca0b0069SAlex Deucher if (bo->tbo.resource->mem_type == TTM_PL_TT)
1487ca0b0069SAlex Deucher offset = amdgpu_gmc_agp_addr(&bo->tbo);
1488ca0b0069SAlex Deucher
1489ca0b0069SAlex Deucher if (offset == AMDGPU_BO_INVALID_OFFSET)
1490d3116756SChristian König offset = (bo->tbo.resource->start << PAGE_SHIFT) +
1491d3116756SChristian König amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
1492b1a8ef95SNirmoy Das
1493b1a8ef95SNirmoy Das return amdgpu_gmc_sign_extend(offset);
1494cdb7e8f2SChristian König }
149584b74608SDeepak Sharma
14962472e11bSMichel Dänzer /**
149774ef9527SYunxiang Li * amdgpu_bo_mem_stats_placement - bo placement for memory accounting
149874ef9527SYunxiang Li * @bo: the buffer object we should look at
149974ef9527SYunxiang Li *
150074ef9527SYunxiang Li * BO can have multiple preferred placements, to avoid double counting we want
150174ef9527SYunxiang Li * to file it under a single placement for memory stats.
150274ef9527SYunxiang Li * Luckily, if we take the highest set bit in preferred_domains the result is
150374ef9527SYunxiang Li * quite sensible.
150474ef9527SYunxiang Li *
150574ef9527SYunxiang Li * Returns:
150674ef9527SYunxiang Li * Which of the placements should the BO be accounted under.
150774ef9527SYunxiang Li */
amdgpu_bo_mem_stats_placement(struct amdgpu_bo * bo)150874ef9527SYunxiang Li uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
150974ef9527SYunxiang Li {
151074ef9527SYunxiang Li uint32_t domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK;
151174ef9527SYunxiang Li
151274ef9527SYunxiang Li if (!domain)
151374ef9527SYunxiang Li return TTM_PL_SYSTEM;
151474ef9527SYunxiang Li
151574ef9527SYunxiang Li switch (rounddown_pow_of_two(domain)) {
151674ef9527SYunxiang Li case AMDGPU_GEM_DOMAIN_CPU:
151774ef9527SYunxiang Li return TTM_PL_SYSTEM;
151874ef9527SYunxiang Li case AMDGPU_GEM_DOMAIN_GTT:
151974ef9527SYunxiang Li return TTM_PL_TT;
152074ef9527SYunxiang Li case AMDGPU_GEM_DOMAIN_VRAM:
152174ef9527SYunxiang Li return TTM_PL_VRAM;
152274ef9527SYunxiang Li case AMDGPU_GEM_DOMAIN_GDS:
152374ef9527SYunxiang Li return AMDGPU_PL_GDS;
152474ef9527SYunxiang Li case AMDGPU_GEM_DOMAIN_GWS:
152574ef9527SYunxiang Li return AMDGPU_PL_GWS;
152674ef9527SYunxiang Li case AMDGPU_GEM_DOMAIN_OA:
152774ef9527SYunxiang Li return AMDGPU_PL_OA;
152874ef9527SYunxiang Li case AMDGPU_GEM_DOMAIN_DOORBELL:
152974ef9527SYunxiang Li return AMDGPU_PL_DOORBELL;
153074ef9527SYunxiang Li default:
153174ef9527SYunxiang Li return TTM_PL_SYSTEM;
153274ef9527SYunxiang Li }
153374ef9527SYunxiang Li }
153474ef9527SYunxiang Li
153574ef9527SYunxiang Li /**
1536d035f84dSYifan Zhang * amdgpu_bo_get_preferred_domain - get preferred domain
15372472e11bSMichel Dänzer * @adev: amdgpu device object
15382472e11bSMichel Dänzer * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
15392472e11bSMichel Dänzer *
15402472e11bSMichel Dänzer * Returns:
1541d035f84dSYifan Zhang * Which of the allowed domains is preferred for allocating the BO.
15422472e11bSMichel Dänzer */
amdgpu_bo_get_preferred_domain(struct amdgpu_device * adev,uint32_t domain)1543d035f84dSYifan Zhang uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
154484b74608SDeepak Sharma uint32_t domain)
154584b74608SDeepak Sharma {
154681d0bcf9SAlex Deucher if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
154781d0bcf9SAlex Deucher ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
154884b74608SDeepak Sharma domain = AMDGPU_GEM_DOMAIN_VRAM;
154984b74608SDeepak Sharma if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
155084b74608SDeepak Sharma domain = AMDGPU_GEM_DOMAIN_GTT;
155184b74608SDeepak Sharma }
155284b74608SDeepak Sharma return domain;
155384b74608SDeepak Sharma }
1554ff72bc40SMihir Bhogilal Patel
1555ff72bc40SMihir Bhogilal Patel #if defined(CONFIG_DEBUG_FS)
1556ff72bc40SMihir Bhogilal Patel #define amdgpu_bo_print_flag(m, bo, flag) \
1557ff72bc40SMihir Bhogilal Patel do { \
1558ff72bc40SMihir Bhogilal Patel if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
1559ff72bc40SMihir Bhogilal Patel seq_printf((m), " " #flag); \
1560ff72bc40SMihir Bhogilal Patel } \
1561ff72bc40SMihir Bhogilal Patel } while (0)
1562ff72bc40SMihir Bhogilal Patel
1563ff72bc40SMihir Bhogilal Patel /**
156425dd7a44SMauro Carvalho Chehab * amdgpu_bo_print_info - print BO info in debugfs file
1565ff72bc40SMihir Bhogilal Patel *
1566ff72bc40SMihir Bhogilal Patel * @id: Index or Id of the BO
1567ff72bc40SMihir Bhogilal Patel * @bo: Requested BO for printing info
1568ff72bc40SMihir Bhogilal Patel * @m: debugfs file
1569ff72bc40SMihir Bhogilal Patel *
1570ff72bc40SMihir Bhogilal Patel * Print BO information in debugfs file
1571ff72bc40SMihir Bhogilal Patel *
1572ff72bc40SMihir Bhogilal Patel * Returns:
1573ff72bc40SMihir Bhogilal Patel * Size of the BO in bytes.
1574ff72bc40SMihir Bhogilal Patel */
amdgpu_bo_print_info(int id,struct amdgpu_bo * bo,struct seq_file * m)1575ff72bc40SMihir Bhogilal Patel u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1576ff72bc40SMihir Bhogilal Patel {
1577a6ff969fSChristian König struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1578ff72bc40SMihir Bhogilal Patel struct dma_buf_attachment *attachment;
1579ff72bc40SMihir Bhogilal Patel struct dma_buf *dma_buf;
1580ff72bc40SMihir Bhogilal Patel const char *placement;
1581ff72bc40SMihir Bhogilal Patel unsigned int pin_count;
1582ff72bc40SMihir Bhogilal Patel u64 size;
1583ff72bc40SMihir Bhogilal Patel
1584818c158fSPierre-Eric Pelloux-Prayer if (dma_resv_trylock(bo->tbo.base.resv)) {
1585c71c9aafSPierre-Eric Pelloux-Prayer if (!bo->tbo.resource) {
1586c71c9aafSPierre-Eric Pelloux-Prayer placement = "NONE";
1587c71c9aafSPierre-Eric Pelloux-Prayer } else {
15888fb0efb1STvrtko Ursulin switch (bo->tbo.resource->mem_type) {
15898fb0efb1STvrtko Ursulin case TTM_PL_VRAM:
1590a6ff969fSChristian König if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
1591818c158fSPierre-Eric Pelloux-Prayer placement = "VRAM VISIBLE";
1592818c158fSPierre-Eric Pelloux-Prayer else
1593ff72bc40SMihir Bhogilal Patel placement = "VRAM";
1594ff72bc40SMihir Bhogilal Patel break;
15958fb0efb1STvrtko Ursulin case TTM_PL_TT:
1596ff72bc40SMihir Bhogilal Patel placement = "GTT";
1597ff72bc40SMihir Bhogilal Patel break;
159850bff04dSTvrtko Ursulin case AMDGPU_PL_GDS:
159950bff04dSTvrtko Ursulin placement = "GDS";
160050bff04dSTvrtko Ursulin break;
160150bff04dSTvrtko Ursulin case AMDGPU_PL_GWS:
160250bff04dSTvrtko Ursulin placement = "GWS";
160350bff04dSTvrtko Ursulin break;
160450bff04dSTvrtko Ursulin case AMDGPU_PL_OA:
160550bff04dSTvrtko Ursulin placement = "OA";
160650bff04dSTvrtko Ursulin break;
160750bff04dSTvrtko Ursulin case AMDGPU_PL_PREEMPT:
160850bff04dSTvrtko Ursulin placement = "PREEMPTIBLE";
160950bff04dSTvrtko Ursulin break;
161050bff04dSTvrtko Ursulin case AMDGPU_PL_DOORBELL:
161150bff04dSTvrtko Ursulin placement = "DOORBELL";
161250bff04dSTvrtko Ursulin break;
16138fb0efb1STvrtko Ursulin case TTM_PL_SYSTEM:
1614ff72bc40SMihir Bhogilal Patel default:
1615ff72bc40SMihir Bhogilal Patel placement = "CPU";
1616ff72bc40SMihir Bhogilal Patel break;
1617ff72bc40SMihir Bhogilal Patel }
1618c71c9aafSPierre-Eric Pelloux-Prayer }
1619818c158fSPierre-Eric Pelloux-Prayer dma_resv_unlock(bo->tbo.base.resv);
1620818c158fSPierre-Eric Pelloux-Prayer } else {
1621818c158fSPierre-Eric Pelloux-Prayer placement = "UNKNOWN";
1622818c158fSPierre-Eric Pelloux-Prayer }
1623ff72bc40SMihir Bhogilal Patel
1624ff72bc40SMihir Bhogilal Patel size = amdgpu_bo_size(bo);
1625ff72bc40SMihir Bhogilal Patel seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1626ff72bc40SMihir Bhogilal Patel id, size, placement);
1627ff72bc40SMihir Bhogilal Patel
16285b8c5969SDave Airlie pin_count = READ_ONCE(bo->tbo.pin_count);
1629ff72bc40SMihir Bhogilal Patel if (pin_count)
1630ff72bc40SMihir Bhogilal Patel seq_printf(m, " pin count %d", pin_count);
1631ff72bc40SMihir Bhogilal Patel
1632ff72bc40SMihir Bhogilal Patel dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1633ff72bc40SMihir Bhogilal Patel attachment = READ_ONCE(bo->tbo.base.import_attach);
1634ff72bc40SMihir Bhogilal Patel
1635ff72bc40SMihir Bhogilal Patel if (attachment)
163626fd808bSPierre-Eric Pelloux-Prayer seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino);
1637ff72bc40SMihir Bhogilal Patel else if (dma_buf)
163826fd808bSPierre-Eric Pelloux-Prayer seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino);
1639ff72bc40SMihir Bhogilal Patel
1640ff72bc40SMihir Bhogilal Patel amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1641ff72bc40SMihir Bhogilal Patel amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1642ff72bc40SMihir Bhogilal Patel amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1643ff72bc40SMihir Bhogilal Patel amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1644ff72bc40SMihir Bhogilal Patel amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1645ff72bc40SMihir Bhogilal Patel amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1646ff72bc40SMihir Bhogilal Patel amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1647ff72bc40SMihir Bhogilal Patel
1648ff72bc40SMihir Bhogilal Patel seq_puts(m, "\n");
1649ff72bc40SMihir Bhogilal Patel
1650ff72bc40SMihir Bhogilal Patel return size;
1651ff72bc40SMihir Bhogilal Patel }
1652ff72bc40SMihir Bhogilal Patel #endif
1653