1e7c94bfbSJingyu Wang // SPDX-License-Identifier: MIT
2a46a2cd1SFelix Kuehling /*
3a46a2cd1SFelix Kuehling * Copyright 2014-2018 Advanced Micro Devices, Inc.
4a46a2cd1SFelix Kuehling *
5a46a2cd1SFelix Kuehling * Permission is hereby granted, free of charge, to any person obtaining a
6a46a2cd1SFelix Kuehling * copy of this software and associated documentation files (the "Software"),
7a46a2cd1SFelix Kuehling * to deal in the Software without restriction, including without limitation
8a46a2cd1SFelix Kuehling * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9a46a2cd1SFelix Kuehling * and/or sell copies of the Software, and to permit persons to whom the
10a46a2cd1SFelix Kuehling * Software is furnished to do so, subject to the following conditions:
11a46a2cd1SFelix Kuehling *
12a46a2cd1SFelix Kuehling * The above copyright notice and this permission notice shall be included in
13a46a2cd1SFelix Kuehling * all copies or substantial portions of the Software.
14a46a2cd1SFelix Kuehling *
15a46a2cd1SFelix Kuehling * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16a46a2cd1SFelix Kuehling * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17a46a2cd1SFelix Kuehling * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18a46a2cd1SFelix Kuehling * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19a46a2cd1SFelix Kuehling * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20a46a2cd1SFelix Kuehling * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21a46a2cd1SFelix Kuehling * OTHER DEALINGS IN THE SOFTWARE.
22a46a2cd1SFelix Kuehling */
23c366be54SSam Ravnborg #include <linux/dma-buf.h>
24a46a2cd1SFelix Kuehling #include <linux/list.h>
25548da31dSStephen Rothwell #include <linux/pagemap.h>
265ae0283eSFelix Kuehling #include <linux/sched/mm.h>
27c366be54SSam Ravnborg #include <linux/sched/task.h>
28a3185f91SChristian König #include <drm/ttm/ttm_tt.h>
29c366be54SSam Ravnborg
308abc1eb2SChristian König #include <drm/drm_exec.h>
318abc1eb2SChristian König
32a46a2cd1SFelix Kuehling #include "amdgpu_object.h"
33875440fdSHuang Rui #include "amdgpu_gem.h"
34a46a2cd1SFelix Kuehling #include "amdgpu_vm.h"
35d9483ecdSChristian König #include "amdgpu_hmm.h"
36a46a2cd1SFelix Kuehling #include "amdgpu_amdkfd.h"
372fbd6f94SChristian König #include "amdgpu_dma_buf.h"
381d251d90SYong Zhao #include <uapi/linux/kfd_ioctl.h>
3972b4db0fSEric Huang #include "amdgpu_xgmi.h"
408dc1db31SMukul Joshi #include "kfd_priv.h"
41c7f21978SPhilip Yang #include "kfd_smi_events.h"
42a46a2cd1SFelix Kuehling
435ae0283eSFelix Kuehling /* Userptr restore delay, just long enough to allow consecutive VM
445ae0283eSFelix Kuehling * changes to accumulate
455ae0283eSFelix Kuehling */
465ae0283eSFelix Kuehling #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
479b37d45dSRajneesh Bhardwaj #define AMDGPU_RESERVE_MEM_LIMIT (3UL << 29)
485ae0283eSFelix Kuehling
499731dd4cSDaniel Phillips /*
50aec208eeSDaniel Phillips * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
519731dd4cSDaniel Phillips * BO chunk
529731dd4cSDaniel Phillips */
53aec208eeSDaniel Phillips #define VRAM_AVAILABLITY_ALIGN (1 << 21)
549731dd4cSDaniel Phillips
55a46a2cd1SFelix Kuehling /* Impose limit on how much memory KFD can use */
56a46a2cd1SFelix Kuehling static struct {
57a46a2cd1SFelix Kuehling uint64_t max_system_mem_limit;
585d240da9SEric Huang uint64_t max_ttm_mem_limit;
59a46a2cd1SFelix Kuehling int64_t system_mem_used;
605d240da9SEric Huang int64_t ttm_mem_used;
61a46a2cd1SFelix Kuehling spinlock_t mem_limit_lock;
62a46a2cd1SFelix Kuehling } kfd_mem_limit;
63a46a2cd1SFelix Kuehling
64a46a2cd1SFelix Kuehling static const char * const domain_bit_to_string[] = {
65a46a2cd1SFelix Kuehling "CPU",
66a46a2cd1SFelix Kuehling "GTT",
67a46a2cd1SFelix Kuehling "VRAM",
68a46a2cd1SFelix Kuehling "GDS",
69a46a2cd1SFelix Kuehling "GWS",
70a46a2cd1SFelix Kuehling "OA"
71a46a2cd1SFelix Kuehling };
72a46a2cd1SFelix Kuehling
73a46a2cd1SFelix Kuehling #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
74a46a2cd1SFelix Kuehling
755ae0283eSFelix Kuehling static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
76a46a2cd1SFelix Kuehling
kfd_mem_is_attached(struct amdgpu_vm * avm,struct kgd_mem * mem)77c780b2eeSFelix Kuehling static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
78a46a2cd1SFelix Kuehling struct kgd_mem *mem)
79a46a2cd1SFelix Kuehling {
80c780b2eeSFelix Kuehling struct kfd_mem_attachment *entry;
81a46a2cd1SFelix Kuehling
82c780b2eeSFelix Kuehling list_for_each_entry(entry, &mem->attachments, list)
83a46a2cd1SFelix Kuehling if (entry->bo_va->base.vm == avm)
84a46a2cd1SFelix Kuehling return true;
85c780b2eeSFelix Kuehling
86c780b2eeSFelix Kuehling return false;
87a46a2cd1SFelix Kuehling }
88a46a2cd1SFelix Kuehling
89207bbfb6SShane Xiao /**
90207bbfb6SShane Xiao * reuse_dmamap() - Check whether adev can share the original
91207bbfb6SShane Xiao * userptr BO
92207bbfb6SShane Xiao *
93207bbfb6SShane Xiao * If both adev and bo_adev are in direct mapping or
94207bbfb6SShane Xiao * in the same iommu group, they can share the original BO.
95207bbfb6SShane Xiao *
96207bbfb6SShane Xiao * @adev: Device to which can or cannot share the original BO
97207bbfb6SShane Xiao * @bo_adev: Device to which allocated BO belongs to
98207bbfb6SShane Xiao *
99207bbfb6SShane Xiao * Return: returns true if adev can share original userptr BO,
100207bbfb6SShane Xiao * false otherwise.
101207bbfb6SShane Xiao */
reuse_dmamap(struct amdgpu_device * adev,struct amdgpu_device * bo_adev)102207bbfb6SShane Xiao static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev)
103207bbfb6SShane Xiao {
104207bbfb6SShane Xiao return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) ||
105207bbfb6SShane Xiao (adev->dev->iommu_group == bo_adev->dev->iommu_group);
106207bbfb6SShane Xiao }
107207bbfb6SShane Xiao
108a46a2cd1SFelix Kuehling /* Set memory usage limits. Current, limits are
109b72ff190SFelix Kuehling * System (TTM + userptr) memory - 15/16th System RAM
1105d240da9SEric Huang * TTM memory - 3/8th System RAM
111a46a2cd1SFelix Kuehling */
amdgpu_amdkfd_gpuvm_init_mem_limits(void)112a46a2cd1SFelix Kuehling void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
113a46a2cd1SFelix Kuehling {
114a46a2cd1SFelix Kuehling struct sysinfo si;
115a46a2cd1SFelix Kuehling uint64_t mem;
116a46a2cd1SFelix Kuehling
11727fb73a0SMukul Joshi if (kfd_mem_limit.max_system_mem_limit)
11827fb73a0SMukul Joshi return;
11927fb73a0SMukul Joshi
120a46a2cd1SFelix Kuehling si_meminfo(&si);
1219b37d45dSRajneesh Bhardwaj mem = si.totalram - si.totalhigh;
122a46a2cd1SFelix Kuehling mem *= si.mem_unit;
123a46a2cd1SFelix Kuehling
124a46a2cd1SFelix Kuehling spin_lock_init(&kfd_mem_limit.mem_limit_lock);
1259b37d45dSRajneesh Bhardwaj kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6);
1269b37d45dSRajneesh Bhardwaj if (kfd_mem_limit.max_system_mem_limit < 2 * AMDGPU_RESERVE_MEM_LIMIT)
1279b37d45dSRajneesh Bhardwaj kfd_mem_limit.max_system_mem_limit >>= 1;
1289b37d45dSRajneesh Bhardwaj else
1299b37d45dSRajneesh Bhardwaj kfd_mem_limit.max_system_mem_limit -= AMDGPU_RESERVE_MEM_LIMIT;
1309b37d45dSRajneesh Bhardwaj
13127fb73a0SMukul Joshi kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT;
1325d240da9SEric Huang pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
1335ae0283eSFelix Kuehling (kfd_mem_limit.max_system_mem_limit >> 20),
1345d240da9SEric Huang (kfd_mem_limit.max_ttm_mem_limit >> 20));
135a46a2cd1SFelix Kuehling }
136a46a2cd1SFelix Kuehling
amdgpu_amdkfd_reserve_system_mem(uint64_t size)137c46ebb6aSPhilip Yang void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
138c46ebb6aSPhilip Yang {
139c46ebb6aSPhilip Yang kfd_mem_limit.system_mem_used += size;
140c46ebb6aSPhilip Yang }
141c46ebb6aSPhilip Yang
14229a39c90SFelix Kuehling /* Estimate page table size needed to represent a given memory size
14329a39c90SFelix Kuehling *
14429a39c90SFelix Kuehling * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
14529a39c90SFelix Kuehling * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
14629a39c90SFelix Kuehling * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
14729a39c90SFelix Kuehling * for 2MB pages for TLB efficiency. However, small allocations and
14829a39c90SFelix Kuehling * fragmented system memory still need some 4KB pages. We choose a
14929a39c90SFelix Kuehling * compromise that should work in most cases without reserving too
15029a39c90SFelix Kuehling * much memory for page tables unnecessarily (factor 16K, >> 14).
15129a39c90SFelix Kuehling */
15286bd6706SAlex Sierra
1539731dd4cSDaniel Phillips #define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM)
15429a39c90SFelix Kuehling
155f441dd33SRamesh Errabolu /**
15677608faaSRajneesh Bhardwaj * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
15786bd6706SAlex Sierra * of buffer.
158f441dd33SRamesh Errabolu *
159f441dd33SRamesh Errabolu * @adev: Device to which allocated BO belongs to
160f441dd33SRamesh Errabolu * @size: Size of buffer, in bytes, encapsulated by B0. This should be
161f441dd33SRamesh Errabolu * equivalent to amdgpu_bo_size(BO)
162f441dd33SRamesh Errabolu * @alloc_flag: Flag used in allocating a BO as noted above
163932fc494SSrinivasan Shanmugam * @xcp_id: xcp_id is used to get xcp from xcp manager, one xcp is
164932fc494SSrinivasan Shanmugam * managed as one compute node in driver for app
165f441dd33SRamesh Errabolu *
166932fc494SSrinivasan Shanmugam * Return:
167932fc494SSrinivasan Shanmugam * returns -ENOMEM in case of error, ZERO otherwise
168f441dd33SRamesh Errabolu */
amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 alloc_flag,int8_t xcp_id)169f9af3c16SAlex Sierra int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
1701c77527aSMukul Joshi uint64_t size, u32 alloc_flag, int8_t xcp_id)
171a46a2cd1SFelix Kuehling {
17229a39c90SFelix Kuehling uint64_t reserved_for_pt =
17329a39c90SFelix Kuehling ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
174473af28dSHawking Zhang struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
175473af28dSHawking Zhang uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0);
17686bd6706SAlex Sierra size_t system_mem_needed, ttm_mem_needed, vram_needed;
177a46a2cd1SFelix Kuehling int ret = 0;
1781c77527aSMukul Joshi uint64_t vram_size = 0;
179a46a2cd1SFelix Kuehling
18086bd6706SAlex Sierra system_mem_needed = 0;
18186bd6706SAlex Sierra ttm_mem_needed = 0;
182611736d8SFelix Kuehling vram_needed = 0;
183f441dd33SRamesh Errabolu if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
18486bd6706SAlex Sierra system_mem_needed = size;
18586bd6706SAlex Sierra ttm_mem_needed = size;
186f441dd33SRamesh Errabolu } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1879731dd4cSDaniel Phillips /*
1889731dd4cSDaniel Phillips * Conservatively round up the allocation requirement to 2 MB
1899731dd4cSDaniel Phillips * to avoid fragmentation caused by 4K allocations in the tail
1909731dd4cSDaniel Phillips * 2M BO chunk.
1919731dd4cSDaniel Phillips */
192aec208eeSDaniel Phillips vram_needed = size;
1931c77527aSMukul Joshi /*
1941c77527aSMukul Joshi * For GFX 9.4.3, get the VRAM size from XCP structs
1951c77527aSMukul Joshi */
1961c77527aSMukul Joshi if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
1971c77527aSMukul Joshi return -EINVAL;
1981c77527aSMukul Joshi
1991c77527aSMukul Joshi vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
2008b0d068eSAlex Deucher if (adev->apu_prefer_gtt) {
2011c77527aSMukul Joshi system_mem_needed = size;
2021c77527aSMukul Joshi ttm_mem_needed = size;
2031c77527aSMukul Joshi }
204f441dd33SRamesh Errabolu } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
20586bd6706SAlex Sierra system_mem_needed = size;
20686bd6706SAlex Sierra } else if (!(alloc_flag &
207f441dd33SRamesh Errabolu (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
20886bd6706SAlex Sierra KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
209f441dd33SRamesh Errabolu pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
210f441dd33SRamesh Errabolu return -ENOMEM;
211a46a2cd1SFelix Kuehling }
2125d240da9SEric Huang
213611736d8SFelix Kuehling spin_lock(&kfd_mem_limit.mem_limit_lock);
214611736d8SFelix Kuehling
215b80f050fSPhilip Yang if (kfd_mem_limit.system_mem_used + system_mem_needed >
216b80f050fSPhilip Yang kfd_mem_limit.max_system_mem_limit)
217b80f050fSPhilip Yang pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
218b80f050fSPhilip Yang
2195d240da9SEric Huang if ((kfd_mem_limit.system_mem_used + system_mem_needed >
220b80f050fSPhilip Yang kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
2215d240da9SEric Huang (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
222611736d8SFelix Kuehling kfd_mem_limit.max_ttm_mem_limit) ||
2231c77527aSMukul Joshi (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
224473af28dSHawking Zhang vram_size - reserved_for_pt - reserved_for_ras - atomic64_read(&adev->vram_pin_size))) {
2255ae0283eSFelix Kuehling ret = -ENOMEM;
226f441dd33SRamesh Errabolu goto release;
2275ae0283eSFelix Kuehling }
2285d240da9SEric Huang
229f441dd33SRamesh Errabolu /* Update memory accounting by decreasing available system
230f441dd33SRamesh Errabolu * memory, TTM memory and GPU memory as computed above
231f441dd33SRamesh Errabolu */
232f9af3c16SAlex Sierra WARN_ONCE(vram_needed && !adev,
233f9af3c16SAlex Sierra "adev reference can't be null when vram is used");
2341c77527aSMukul Joshi if (adev && xcp_id >= 0) {
2351c77527aSMukul Joshi adev->kfd.vram_used[xcp_id] += vram_needed;
23689773b85SLang Yu adev->kfd.vram_used_aligned[xcp_id] +=
2378b0d068eSAlex Deucher adev->apu_prefer_gtt ?
2381c77527aSMukul Joshi vram_needed :
2391c77527aSMukul Joshi ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
2401ac354beSDaniel Phillips }
241f441dd33SRamesh Errabolu kfd_mem_limit.system_mem_used += system_mem_needed;
242f441dd33SRamesh Errabolu kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
243f441dd33SRamesh Errabolu
244f441dd33SRamesh Errabolu release:
245a46a2cd1SFelix Kuehling spin_unlock(&kfd_mem_limit.mem_limit_lock);
246a46a2cd1SFelix Kuehling return ret;
247a46a2cd1SFelix Kuehling }
248a46a2cd1SFelix Kuehling
amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device * adev,uint64_t size,u32 alloc_flag,int8_t xcp_id)249f9af3c16SAlex Sierra void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
2501c77527aSMukul Joshi uint64_t size, u32 alloc_flag, int8_t xcp_id)
251a46a2cd1SFelix Kuehling {
252a46a2cd1SFelix Kuehling spin_lock(&kfd_mem_limit.mem_limit_lock);
253f441dd33SRamesh Errabolu
254f441dd33SRamesh Errabolu if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
25586bd6706SAlex Sierra kfd_mem_limit.system_mem_used -= size;
25686bd6706SAlex Sierra kfd_mem_limit.ttm_mem_used -= size;
257f441dd33SRamesh Errabolu } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
258f9af3c16SAlex Sierra WARN_ONCE(!adev,
259f9af3c16SAlex Sierra "adev reference can't be null when alloc mem flags vram is set");
2601c77527aSMukul Joshi if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
2611c77527aSMukul Joshi goto release;
2621c77527aSMukul Joshi
2631ac354beSDaniel Phillips if (adev) {
2641c77527aSMukul Joshi adev->kfd.vram_used[xcp_id] -= size;
2658b0d068eSAlex Deucher if (adev->apu_prefer_gtt) {
2661c77527aSMukul Joshi adev->kfd.vram_used_aligned[xcp_id] -= size;
2671c77527aSMukul Joshi kfd_mem_limit.system_mem_used -= size;
2681c77527aSMukul Joshi kfd_mem_limit.ttm_mem_used -= size;
2691c77527aSMukul Joshi } else {
2701c77527aSMukul Joshi adev->kfd.vram_used_aligned[xcp_id] -=
2711c77527aSMukul Joshi ALIGN(size, VRAM_AVAILABLITY_ALIGN);
2721c77527aSMukul Joshi }
2731ac354beSDaniel Phillips }
274f441dd33SRamesh Errabolu } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
27586bd6706SAlex Sierra kfd_mem_limit.system_mem_used -= size;
27686bd6706SAlex Sierra } else if (!(alloc_flag &
277f441dd33SRamesh Errabolu (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
27886bd6706SAlex Sierra KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
279f441dd33SRamesh Errabolu pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
280f441dd33SRamesh Errabolu goto release;
281611736d8SFelix Kuehling }
2821c77527aSMukul Joshi WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0,
2831c77527aSMukul Joshi "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id);
284f441dd33SRamesh Errabolu WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
285f441dd33SRamesh Errabolu "KFD TTM memory accounting unbalanced");
286f441dd33SRamesh Errabolu WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
287f441dd33SRamesh Errabolu "KFD system memory accounting unbalanced");
288f441dd33SRamesh Errabolu
289f441dd33SRamesh Errabolu release:
290a46a2cd1SFelix Kuehling spin_unlock(&kfd_mem_limit.mem_limit_lock);
291a46a2cd1SFelix Kuehling }
292a46a2cd1SFelix Kuehling
amdgpu_amdkfd_release_notify(struct amdgpu_bo * bo)2935702d052SFelix Kuehling void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
294a46a2cd1SFelix Kuehling {
295611736d8SFelix Kuehling struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
296f441dd33SRamesh Errabolu u32 alloc_flags = bo->kfd_bo->alloc_flags;
297f441dd33SRamesh Errabolu u64 size = amdgpu_bo_size(bo);
298a46a2cd1SFelix Kuehling
2991c77527aSMukul Joshi amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags,
3001c77527aSMukul Joshi bo->xcp_id);
3015702d052SFelix Kuehling
3025702d052SFelix Kuehling kfree(bo->kfd_bo);
303a46a2cd1SFelix Kuehling }
304a46a2cd1SFelix Kuehling
30508a2fd23SRamesh Errabolu /**
306837d4e07SSrinivasan Shanmugam * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information
30708a2fd23SRamesh Errabolu * about USERPTR or DOOREBELL or MMIO BO.
308837d4e07SSrinivasan Shanmugam *
30908a2fd23SRamesh Errabolu * @adev: Device for which dmamap BO is being created
31008a2fd23SRamesh Errabolu * @mem: BO of peer device that is being DMA mapped. Provides parameters
31108a2fd23SRamesh Errabolu * in building the dmamap BO
31208a2fd23SRamesh Errabolu * @bo_out: Output parameter updated with handle of dmamap BO
31308a2fd23SRamesh Errabolu */
31408a2fd23SRamesh Errabolu static int
create_dmamap_sg_bo(struct amdgpu_device * adev,struct kgd_mem * mem,struct amdgpu_bo ** bo_out)31508a2fd23SRamesh Errabolu create_dmamap_sg_bo(struct amdgpu_device *adev,
31608a2fd23SRamesh Errabolu struct kgd_mem *mem, struct amdgpu_bo **bo_out)
31708a2fd23SRamesh Errabolu {
31808a2fd23SRamesh Errabolu struct drm_gem_object *gem_obj;
319af152c21SShane Xiao int ret;
320af152c21SShane Xiao uint64_t flags = 0;
32108a2fd23SRamesh Errabolu
32208a2fd23SRamesh Errabolu ret = amdgpu_bo_reserve(mem->bo, false);
32308a2fd23SRamesh Errabolu if (ret)
32408a2fd23SRamesh Errabolu return ret;
32508a2fd23SRamesh Errabolu
326af152c21SShane Xiao if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)
327af152c21SShane Xiao flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
328af152c21SShane Xiao AMDGPU_GEM_CREATE_UNCACHED);
329af152c21SShane Xiao
330af152c21SShane Xiao ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
331af152c21SShane Xiao AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
332f24e924bSPhilip Yang ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0);
33308a2fd23SRamesh Errabolu
33408a2fd23SRamesh Errabolu amdgpu_bo_unreserve(mem->bo);
33508a2fd23SRamesh Errabolu
33608a2fd23SRamesh Errabolu if (ret) {
33708a2fd23SRamesh Errabolu pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
33808a2fd23SRamesh Errabolu return -EINVAL;
33908a2fd23SRamesh Errabolu }
34008a2fd23SRamesh Errabolu
34108a2fd23SRamesh Errabolu *bo_out = gem_to_amdgpu_bo(gem_obj);
34208a2fd23SRamesh Errabolu (*bo_out)->parent = amdgpu_bo_ref(mem->bo);
34308a2fd23SRamesh Errabolu return ret;
34408a2fd23SRamesh Errabolu }
34508a2fd23SRamesh Errabolu
3462d086fdeSFelix Kuehling /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
347a46a2cd1SFelix Kuehling * reservation object.
348a46a2cd1SFelix Kuehling *
349a46a2cd1SFelix Kuehling * @bo: [IN] Remove eviction fence(s) from this BO
3502d086fdeSFelix Kuehling * @ef: [IN] This eviction fence is removed if it
351a46a2cd1SFelix Kuehling * is present in the shared list.
352a46a2cd1SFelix Kuehling *
353a46a2cd1SFelix Kuehling * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
354a46a2cd1SFelix Kuehling */
amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo * bo,struct amdgpu_amdkfd_fence * ef)355a46a2cd1SFelix Kuehling static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
3562d086fdeSFelix Kuehling struct amdgpu_amdkfd_fence *ef)
357a46a2cd1SFelix Kuehling {
358548e7432SChristian König struct dma_fence *replacement;
359a46a2cd1SFelix Kuehling
3602d086fdeSFelix Kuehling if (!ef)
361a46a2cd1SFelix Kuehling return -EINVAL;
362a46a2cd1SFelix Kuehling
363548e7432SChristian König /* TODO: Instead of block before we should use the fence of the page
364548e7432SChristian König * table update and TLB flush here directly.
365e6f8d26eSChristian König */
366548e7432SChristian König replacement = dma_fence_get_stub();
367548e7432SChristian König dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
36842470840SChristian König replacement, DMA_RESV_USAGE_BOOKKEEP);
369548e7432SChristian König dma_fence_put(replacement);
370a46a2cd1SFelix Kuehling return 0;
371a46a2cd1SFelix Kuehling }
372a46a2cd1SFelix Kuehling
373cb0de06dSChristian König /**
374cb0de06dSChristian König * amdgpu_amdkfd_remove_all_eviction_fences - Remove all eviction fences
375cb0de06dSChristian König * @bo: the BO where to remove the evictions fences from.
376cb0de06dSChristian König *
377cb0de06dSChristian König * This functions should only be used on release when all references to the BO
378cb0de06dSChristian König * are already dropped. We remove the eviction fence from the private copy of
379cb0de06dSChristian König * the dma_resv object here since that is what is used during release to
380cb0de06dSChristian König * determine of the BO is idle or not.
381cb0de06dSChristian König */
amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo * bo)382cb0de06dSChristian König void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
383f4a3c42bSxinhui pan {
384cb0de06dSChristian König struct dma_resv *resv = &bo->tbo.base._resv;
385cb0de06dSChristian König struct dma_fence *fence, *stub;
386cb0de06dSChristian König struct dma_resv_iter cursor;
387f4a3c42bSxinhui pan
388cb0de06dSChristian König dma_resv_assert_held(resv);
389f4a3c42bSxinhui pan
390cb0de06dSChristian König stub = dma_fence_get_stub();
391cb0de06dSChristian König dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
392cb0de06dSChristian König if (!to_amdgpu_amdkfd_fence(fence))
393cb0de06dSChristian König continue;
394f4a3c42bSxinhui pan
395cb0de06dSChristian König dma_resv_replace_fences(resv, fence->context, stub,
396cb0de06dSChristian König DMA_RESV_USAGE_BOOKKEEP);
397cb0de06dSChristian König }
398cb0de06dSChristian König dma_fence_put(stub);
399f4a3c42bSxinhui pan }
400f4a3c42bSxinhui pan
amdgpu_amdkfd_bo_validate(struct amdgpu_bo * bo,uint32_t domain,bool wait)401a46a2cd1SFelix Kuehling static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
402a46a2cd1SFelix Kuehling bool wait)
403a46a2cd1SFelix Kuehling {
404a46a2cd1SFelix Kuehling struct ttm_operation_ctx ctx = { false, false };
405a46a2cd1SFelix Kuehling int ret;
406a46a2cd1SFelix Kuehling
407a46a2cd1SFelix Kuehling if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
408a46a2cd1SFelix Kuehling "Called with userptr BO"))
409a46a2cd1SFelix Kuehling return -EINVAL;
410a46a2cd1SFelix Kuehling
411f326d7ccSXiaogang Chen /* bo has been pinned, not need validate it */
412f326d7ccSXiaogang Chen if (bo->tbo.pin_count)
413f326d7ccSXiaogang Chen return 0;
414f326d7ccSXiaogang Chen
415c704ab18SChristian König amdgpu_bo_placement_from_domain(bo, domain);
416a46a2cd1SFelix Kuehling
417a46a2cd1SFelix Kuehling ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
418a46a2cd1SFelix Kuehling if (ret)
419a46a2cd1SFelix Kuehling goto validate_fail;
4202d086fdeSFelix Kuehling if (wait)
421c60cd590SFelix Kuehling amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
422a46a2cd1SFelix Kuehling
423a46a2cd1SFelix Kuehling validate_fail:
424a46a2cd1SFelix Kuehling return ret;
425a46a2cd1SFelix Kuehling }
426a46a2cd1SFelix Kuehling
amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo * bo,uint32_t domain,struct dma_fence * fence)42750661eb1SFelix Kuehling int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
4280e2e7c5bSFelix Kuehling uint32_t domain,
4290e2e7c5bSFelix Kuehling struct dma_fence *fence)
4300e2e7c5bSFelix Kuehling {
4310e2e7c5bSFelix Kuehling int ret = amdgpu_bo_reserve(bo, false);
4320e2e7c5bSFelix Kuehling
4330e2e7c5bSFelix Kuehling if (ret)
4340e2e7c5bSFelix Kuehling return ret;
4350e2e7c5bSFelix Kuehling
4360e2e7c5bSFelix Kuehling ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
4370e2e7c5bSFelix Kuehling if (ret)
4380e2e7c5bSFelix Kuehling goto unreserve_out;
4390e2e7c5bSFelix Kuehling
4400e2e7c5bSFelix Kuehling ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
4410e2e7c5bSFelix Kuehling if (ret)
4420e2e7c5bSFelix Kuehling goto unreserve_out;
4430e2e7c5bSFelix Kuehling
4440e2e7c5bSFelix Kuehling dma_resv_add_fence(bo->tbo.base.resv, fence,
4450e2e7c5bSFelix Kuehling DMA_RESV_USAGE_BOOKKEEP);
4460e2e7c5bSFelix Kuehling
4470e2e7c5bSFelix Kuehling unreserve_out:
4480e2e7c5bSFelix Kuehling amdgpu_bo_unreserve(bo);
4490e2e7c5bSFelix Kuehling
4500e2e7c5bSFelix Kuehling return ret;
4510e2e7c5bSFelix Kuehling }
4520e2e7c5bSFelix Kuehling
amdgpu_amdkfd_validate_vm_bo(void * _unused,struct amdgpu_bo * bo)453bc05716dSNirmoy Das static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
454a46a2cd1SFelix Kuehling {
455bc05716dSNirmoy Das return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
456a46a2cd1SFelix Kuehling }
457a46a2cd1SFelix Kuehling
458a46a2cd1SFelix Kuehling /* vm_validate_pt_pd_bos - Validate page table and directory BOs
459a46a2cd1SFelix Kuehling *
460a46a2cd1SFelix Kuehling * Page directories are not updated here because huge page handling
461a46a2cd1SFelix Kuehling * during page table updates can invalidate page directory entries
462a46a2cd1SFelix Kuehling * again. Page directories are only updated after updating page
463a46a2cd1SFelix Kuehling * tables.
464a46a2cd1SFelix Kuehling */
vm_validate_pt_pd_bos(struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket)46550661eb1SFelix Kuehling static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm,
46650661eb1SFelix Kuehling struct ww_acquire_ctx *ticket)
467a46a2cd1SFelix Kuehling {
468391629bdSNirmoy Das struct amdgpu_bo *pd = vm->root.bo;
469a46a2cd1SFelix Kuehling struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
470a46a2cd1SFelix Kuehling int ret;
471a46a2cd1SFelix Kuehling
47250661eb1SFelix Kuehling ret = amdgpu_vm_validate(adev, vm, ticket,
47350661eb1SFelix Kuehling amdgpu_amdkfd_validate_vm_bo, NULL);
474a46a2cd1SFelix Kuehling if (ret) {
475dd4fa6c1SAurabindo Pillai pr_err("failed to validate PT BOs\n");
476a46a2cd1SFelix Kuehling return ret;
477a46a2cd1SFelix Kuehling }
478a46a2cd1SFelix Kuehling
479391629bdSNirmoy Das vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
480a46a2cd1SFelix Kuehling
481a46a2cd1SFelix Kuehling return 0;
482a46a2cd1SFelix Kuehling }
483a46a2cd1SFelix Kuehling
vm_update_pds(struct amdgpu_vm * vm,struct amdgpu_sync * sync)484a46a2cd1SFelix Kuehling static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
485a46a2cd1SFelix Kuehling {
486391629bdSNirmoy Das struct amdgpu_bo *pd = vm->root.bo;
487a46a2cd1SFelix Kuehling struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
488a46a2cd1SFelix Kuehling int ret;
489a46a2cd1SFelix Kuehling
490807e2994SChristian König ret = amdgpu_vm_update_pdes(adev, vm, false);
491a46a2cd1SFelix Kuehling if (ret)
492a46a2cd1SFelix Kuehling return ret;
493a46a2cd1SFelix Kuehling
494*16590745SChristian König return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
495a46a2cd1SFelix Kuehling }
496a46a2cd1SFelix Kuehling
get_pte_flags(struct amdgpu_device * adev,struct kgd_mem * mem)497d0ba51b1SFelix Kuehling static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
498d0ba51b1SFelix Kuehling {
499d1a372afSFelix Kuehling uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
500d1a372afSFelix Kuehling AMDGPU_VM_MTYPE_DEFAULT;
501d0ba51b1SFelix Kuehling
5021d251d90SYong Zhao if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
503d0ba51b1SFelix Kuehling mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
5041d251d90SYong Zhao if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
505d0ba51b1SFelix Kuehling mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
506d0ba51b1SFelix Kuehling
507d1a372afSFelix Kuehling return amdgpu_gem_va_map_flags(adev, mapping_flags);
508d0ba51b1SFelix Kuehling }
509d0ba51b1SFelix Kuehling
51008a2fd23SRamesh Errabolu /**
51108a2fd23SRamesh Errabolu * create_sg_table() - Create an sg_table for a contiguous DMA addr range
51208a2fd23SRamesh Errabolu * @addr: The starting address to point to
51308a2fd23SRamesh Errabolu * @size: Size of memory area in bytes being pointed to
51408a2fd23SRamesh Errabolu *
51508a2fd23SRamesh Errabolu * Allocates an instance of sg_table and initializes it to point to memory
51608a2fd23SRamesh Errabolu * area specified by input parameters. The address used to build is assumed
51708a2fd23SRamesh Errabolu * to be DMA mapped, if needed.
51808a2fd23SRamesh Errabolu *
51908a2fd23SRamesh Errabolu * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table
52008a2fd23SRamesh Errabolu * because they are physically contiguous.
52108a2fd23SRamesh Errabolu *
52208a2fd23SRamesh Errabolu * Return: Initialized instance of SG Table or NULL
52308a2fd23SRamesh Errabolu */
create_sg_table(uint64_t addr,uint32_t size)52408a2fd23SRamesh Errabolu static struct sg_table *create_sg_table(uint64_t addr, uint32_t size)
52508a2fd23SRamesh Errabolu {
52608a2fd23SRamesh Errabolu struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
52708a2fd23SRamesh Errabolu
52808a2fd23SRamesh Errabolu if (!sg)
52908a2fd23SRamesh Errabolu return NULL;
53008a2fd23SRamesh Errabolu if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
53108a2fd23SRamesh Errabolu kfree(sg);
53208a2fd23SRamesh Errabolu return NULL;
53308a2fd23SRamesh Errabolu }
53408a2fd23SRamesh Errabolu sg_dma_address(sg->sgl) = addr;
53508a2fd23SRamesh Errabolu sg->sgl->length = size;
53608a2fd23SRamesh Errabolu #ifdef CONFIG_NEED_SG_DMA_LENGTH
53708a2fd23SRamesh Errabolu sg->sgl->dma_length = size;
53808a2fd23SRamesh Errabolu #endif
53908a2fd23SRamesh Errabolu return sg;
54008a2fd23SRamesh Errabolu }
54108a2fd23SRamesh Errabolu
542264fb4d3SFelix Kuehling static int
kfd_mem_dmamap_userptr(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)543264fb4d3SFelix Kuehling kfd_mem_dmamap_userptr(struct kgd_mem *mem,
544264fb4d3SFelix Kuehling struct kfd_mem_attachment *attachment)
545264fb4d3SFelix Kuehling {
546264fb4d3SFelix Kuehling enum dma_data_direction direction =
547264fb4d3SFelix Kuehling mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
548264fb4d3SFelix Kuehling DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
549264fb4d3SFelix Kuehling struct ttm_operation_ctx ctx = {.interruptible = true};
550264fb4d3SFelix Kuehling struct amdgpu_bo *bo = attachment->bo_va->base.bo;
551264fb4d3SFelix Kuehling struct amdgpu_device *adev = attachment->adev;
552264fb4d3SFelix Kuehling struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
553264fb4d3SFelix Kuehling struct ttm_tt *ttm = bo->tbo.ttm;
554264fb4d3SFelix Kuehling int ret;
555264fb4d3SFelix Kuehling
5567b5a4d7bSRafael Mendonca if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
5577b5a4d7bSRafael Mendonca return -EINVAL;
5587b5a4d7bSRafael Mendonca
559264fb4d3SFelix Kuehling ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
560264fb4d3SFelix Kuehling if (unlikely(!ttm->sg))
561264fb4d3SFelix Kuehling return -ENOMEM;
562264fb4d3SFelix Kuehling
563264fb4d3SFelix Kuehling /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
564264fb4d3SFelix Kuehling ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
565264fb4d3SFelix Kuehling ttm->num_pages, 0,
566264fb4d3SFelix Kuehling (u64)ttm->num_pages << PAGE_SHIFT,
567264fb4d3SFelix Kuehling GFP_KERNEL);
568264fb4d3SFelix Kuehling if (unlikely(ret))
569264fb4d3SFelix Kuehling goto free_sg;
570264fb4d3SFelix Kuehling
571264fb4d3SFelix Kuehling ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
572264fb4d3SFelix Kuehling if (unlikely(ret))
573264fb4d3SFelix Kuehling goto release_sg;
574264fb4d3SFelix Kuehling
575264fb4d3SFelix Kuehling amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
576264fb4d3SFelix Kuehling ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
577264fb4d3SFelix Kuehling if (ret)
578264fb4d3SFelix Kuehling goto unmap_sg;
579264fb4d3SFelix Kuehling
580264fb4d3SFelix Kuehling return 0;
581264fb4d3SFelix Kuehling
582264fb4d3SFelix Kuehling unmap_sg:
583264fb4d3SFelix Kuehling dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
584264fb4d3SFelix Kuehling release_sg:
585264fb4d3SFelix Kuehling pr_err("DMA map userptr failed: %d\n", ret);
586264fb4d3SFelix Kuehling sg_free_table(ttm->sg);
587264fb4d3SFelix Kuehling free_sg:
588264fb4d3SFelix Kuehling kfree(ttm->sg);
589264fb4d3SFelix Kuehling ttm->sg = NULL;
590264fb4d3SFelix Kuehling return ret;
591264fb4d3SFelix Kuehling }
592264fb4d3SFelix Kuehling
593264fb4d3SFelix Kuehling static int
kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment * attachment)5945ac3c3e4SFelix Kuehling kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
5955ac3c3e4SFelix Kuehling {
5965ac3c3e4SFelix Kuehling struct ttm_operation_ctx ctx = {.interruptible = true};
5975ac3c3e4SFelix Kuehling struct amdgpu_bo *bo = attachment->bo_va->base.bo;
5985ac3c3e4SFelix Kuehling
5995ac3c3e4SFelix Kuehling amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
6005ac3c3e4SFelix Kuehling return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
6015ac3c3e4SFelix Kuehling }
6025ac3c3e4SFelix Kuehling
60308a2fd23SRamesh Errabolu /**
60408a2fd23SRamesh Errabolu * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
60508a2fd23SRamesh Errabolu * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
60608a2fd23SRamesh Errabolu * @attachment: Virtual address attachment of the BO on accessing device
60708a2fd23SRamesh Errabolu *
60808a2fd23SRamesh Errabolu * An access request from the device that owns DOORBELL does not require DMA mapping.
60908a2fd23SRamesh Errabolu * This is because the request doesn't go through PCIe root complex i.e. it instead
61008a2fd23SRamesh Errabolu * loops back. The need to DMA map arises only when accessing peer device's DOORBELL
61108a2fd23SRamesh Errabolu *
61208a2fd23SRamesh Errabolu * In contrast, all access requests for MMIO need to be DMA mapped without regard to
61308a2fd23SRamesh Errabolu * device ownership. This is because access requests for MMIO go through PCIe root
61408a2fd23SRamesh Errabolu * complex.
61508a2fd23SRamesh Errabolu *
61608a2fd23SRamesh Errabolu * This is accomplished in two steps:
61708a2fd23SRamesh Errabolu * - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
61808a2fd23SRamesh Errabolu * in updating requesting device's page table
61908a2fd23SRamesh Errabolu * - Signal TTM to mark memory pointed to by requesting device's BO as GPU
62008a2fd23SRamesh Errabolu * accessible. This allows an update of requesting device's page table
62108a2fd23SRamesh Errabolu * with entries associated with DOOREBELL or MMIO memory
62208a2fd23SRamesh Errabolu *
62308a2fd23SRamesh Errabolu * This method is invoked in the following contexts:
62408a2fd23SRamesh Errabolu * - Mapping of DOORBELL or MMIO BO of same or peer device
62508a2fd23SRamesh Errabolu * - Validating an evicted DOOREBELL or MMIO BO on device seeking access
62608a2fd23SRamesh Errabolu *
62708a2fd23SRamesh Errabolu * Return: ZERO if successful, NON-ZERO otherwise
62808a2fd23SRamesh Errabolu */
62908a2fd23SRamesh Errabolu static int
kfd_mem_dmamap_sg_bo(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)63008a2fd23SRamesh Errabolu kfd_mem_dmamap_sg_bo(struct kgd_mem *mem,
63108a2fd23SRamesh Errabolu struct kfd_mem_attachment *attachment)
63208a2fd23SRamesh Errabolu {
63308a2fd23SRamesh Errabolu struct ttm_operation_ctx ctx = {.interruptible = true};
63408a2fd23SRamesh Errabolu struct amdgpu_bo *bo = attachment->bo_va->base.bo;
63508a2fd23SRamesh Errabolu struct amdgpu_device *adev = attachment->adev;
63608a2fd23SRamesh Errabolu struct ttm_tt *ttm = bo->tbo.ttm;
63708a2fd23SRamesh Errabolu enum dma_data_direction dir;
63808a2fd23SRamesh Errabolu dma_addr_t dma_addr;
63908a2fd23SRamesh Errabolu bool mmio;
64008a2fd23SRamesh Errabolu int ret;
64108a2fd23SRamesh Errabolu
64208a2fd23SRamesh Errabolu /* Expect SG Table of dmapmap BO to be NULL */
64308a2fd23SRamesh Errabolu mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP);
64408a2fd23SRamesh Errabolu if (unlikely(ttm->sg)) {
64508a2fd23SRamesh Errabolu pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio);
64608a2fd23SRamesh Errabolu return -EINVAL;
64708a2fd23SRamesh Errabolu }
64808a2fd23SRamesh Errabolu
64908a2fd23SRamesh Errabolu dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
65008a2fd23SRamesh Errabolu DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
65108a2fd23SRamesh Errabolu dma_addr = mem->bo->tbo.sg->sgl->dma_address;
65208a2fd23SRamesh Errabolu pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length);
65308a2fd23SRamesh Errabolu pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr);
65408a2fd23SRamesh Errabolu dma_addr = dma_map_resource(adev->dev, dma_addr,
65508a2fd23SRamesh Errabolu mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
65608a2fd23SRamesh Errabolu ret = dma_mapping_error(adev->dev, dma_addr);
65708a2fd23SRamesh Errabolu if (unlikely(ret))
65808a2fd23SRamesh Errabolu return ret;
65908a2fd23SRamesh Errabolu pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr);
66008a2fd23SRamesh Errabolu
66108a2fd23SRamesh Errabolu ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length);
66208a2fd23SRamesh Errabolu if (unlikely(!ttm->sg)) {
66308a2fd23SRamesh Errabolu ret = -ENOMEM;
66408a2fd23SRamesh Errabolu goto unmap_sg;
66508a2fd23SRamesh Errabolu }
66608a2fd23SRamesh Errabolu
66708a2fd23SRamesh Errabolu amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
66808a2fd23SRamesh Errabolu ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
66908a2fd23SRamesh Errabolu if (unlikely(ret))
67008a2fd23SRamesh Errabolu goto free_sg;
67108a2fd23SRamesh Errabolu
67208a2fd23SRamesh Errabolu return ret;
67308a2fd23SRamesh Errabolu
67408a2fd23SRamesh Errabolu free_sg:
67508a2fd23SRamesh Errabolu sg_free_table(ttm->sg);
67608a2fd23SRamesh Errabolu kfree(ttm->sg);
67708a2fd23SRamesh Errabolu ttm->sg = NULL;
67808a2fd23SRamesh Errabolu unmap_sg:
67908a2fd23SRamesh Errabolu dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length,
68008a2fd23SRamesh Errabolu dir, DMA_ATTR_SKIP_CPU_SYNC);
68108a2fd23SRamesh Errabolu return ret;
68208a2fd23SRamesh Errabolu }
68308a2fd23SRamesh Errabolu
6845ac3c3e4SFelix Kuehling static int
kfd_mem_dmamap_attachment(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)685264fb4d3SFelix Kuehling kfd_mem_dmamap_attachment(struct kgd_mem *mem,
686264fb4d3SFelix Kuehling struct kfd_mem_attachment *attachment)
687264fb4d3SFelix Kuehling {
688264fb4d3SFelix Kuehling switch (attachment->type) {
689264fb4d3SFelix Kuehling case KFD_MEM_ATT_SHARED:
690264fb4d3SFelix Kuehling return 0;
691264fb4d3SFelix Kuehling case KFD_MEM_ATT_USERPTR:
692264fb4d3SFelix Kuehling return kfd_mem_dmamap_userptr(mem, attachment);
6935ac3c3e4SFelix Kuehling case KFD_MEM_ATT_DMABUF:
6945ac3c3e4SFelix Kuehling return kfd_mem_dmamap_dmabuf(attachment);
69508a2fd23SRamesh Errabolu case KFD_MEM_ATT_SG:
69608a2fd23SRamesh Errabolu return kfd_mem_dmamap_sg_bo(mem, attachment);
697264fb4d3SFelix Kuehling default:
698264fb4d3SFelix Kuehling WARN_ON_ONCE(1);
699264fb4d3SFelix Kuehling }
700264fb4d3SFelix Kuehling return -EINVAL;
701264fb4d3SFelix Kuehling }
702264fb4d3SFelix Kuehling
703264fb4d3SFelix Kuehling static void
kfd_mem_dmaunmap_userptr(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)704264fb4d3SFelix Kuehling kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
705264fb4d3SFelix Kuehling struct kfd_mem_attachment *attachment)
706264fb4d3SFelix Kuehling {
707264fb4d3SFelix Kuehling enum dma_data_direction direction =
708264fb4d3SFelix Kuehling mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
709264fb4d3SFelix Kuehling DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
710264fb4d3SFelix Kuehling struct ttm_operation_ctx ctx = {.interruptible = false};
711264fb4d3SFelix Kuehling struct amdgpu_bo *bo = attachment->bo_va->base.bo;
712264fb4d3SFelix Kuehling struct amdgpu_device *adev = attachment->adev;
713264fb4d3SFelix Kuehling struct ttm_tt *ttm = bo->tbo.ttm;
714264fb4d3SFelix Kuehling
715264fb4d3SFelix Kuehling if (unlikely(!ttm->sg))
716264fb4d3SFelix Kuehling return;
717264fb4d3SFelix Kuehling
718264fb4d3SFelix Kuehling amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
719357ef5b3SAndrew Martin (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
720264fb4d3SFelix Kuehling
721264fb4d3SFelix Kuehling dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
722264fb4d3SFelix Kuehling sg_free_table(ttm->sg);
723546dc20fSLang Yu kfree(ttm->sg);
724264fb4d3SFelix Kuehling ttm->sg = NULL;
725264fb4d3SFelix Kuehling }
726264fb4d3SFelix Kuehling
727264fb4d3SFelix Kuehling static void
kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment * attachment)7285ac3c3e4SFelix Kuehling kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
7295ac3c3e4SFelix Kuehling {
730b9274387SFelix Kuehling /* This is a no-op. We don't want to trigger eviction fences when
731b9274387SFelix Kuehling * unmapping DMABufs. Therefore the invalidation (moving to system
732b9274387SFelix Kuehling * domain) is done in kfd_mem_dmamap_dmabuf.
733b9274387SFelix Kuehling */
7345ac3c3e4SFelix Kuehling }
7355ac3c3e4SFelix Kuehling
73608a2fd23SRamesh Errabolu /**
73708a2fd23SRamesh Errabolu * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
73808a2fd23SRamesh Errabolu * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
73908a2fd23SRamesh Errabolu * @attachment: Virtual address attachment of the BO on accessing device
74008a2fd23SRamesh Errabolu *
74108a2fd23SRamesh Errabolu * The method performs following steps:
74208a2fd23SRamesh Errabolu * - Signal TTM to mark memory pointed to by BO as GPU inaccessible
74308a2fd23SRamesh Errabolu * - Free SG Table that is used to encapsulate DMA mapped memory of
74408a2fd23SRamesh Errabolu * peer device's DOORBELL or MMIO memory
74508a2fd23SRamesh Errabolu *
74608a2fd23SRamesh Errabolu * This method is invoked in the following contexts:
74708a2fd23SRamesh Errabolu * UNMapping of DOORBELL or MMIO BO on a device having access to its memory
74808a2fd23SRamesh Errabolu * Eviction of DOOREBELL or MMIO BO on device having access to its memory
74908a2fd23SRamesh Errabolu *
75008a2fd23SRamesh Errabolu * Return: void
75108a2fd23SRamesh Errabolu */
75208a2fd23SRamesh Errabolu static void
kfd_mem_dmaunmap_sg_bo(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)75308a2fd23SRamesh Errabolu kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
75408a2fd23SRamesh Errabolu struct kfd_mem_attachment *attachment)
75508a2fd23SRamesh Errabolu {
75608a2fd23SRamesh Errabolu struct ttm_operation_ctx ctx = {.interruptible = true};
75708a2fd23SRamesh Errabolu struct amdgpu_bo *bo = attachment->bo_va->base.bo;
75808a2fd23SRamesh Errabolu struct amdgpu_device *adev = attachment->adev;
75908a2fd23SRamesh Errabolu struct ttm_tt *ttm = bo->tbo.ttm;
76008a2fd23SRamesh Errabolu enum dma_data_direction dir;
76108a2fd23SRamesh Errabolu
76208a2fd23SRamesh Errabolu if (unlikely(!ttm->sg)) {
763101b8104SPhilip Yang pr_debug("SG Table of BO is NULL");
76408a2fd23SRamesh Errabolu return;
76508a2fd23SRamesh Errabolu }
76608a2fd23SRamesh Errabolu
76708a2fd23SRamesh Errabolu amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
768357ef5b3SAndrew Martin (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
76908a2fd23SRamesh Errabolu
77008a2fd23SRamesh Errabolu dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
77108a2fd23SRamesh Errabolu DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
77208a2fd23SRamesh Errabolu dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address,
77308a2fd23SRamesh Errabolu ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
77408a2fd23SRamesh Errabolu sg_free_table(ttm->sg);
77508a2fd23SRamesh Errabolu kfree(ttm->sg);
77608a2fd23SRamesh Errabolu ttm->sg = NULL;
77708a2fd23SRamesh Errabolu bo->tbo.sg = NULL;
77808a2fd23SRamesh Errabolu }
77908a2fd23SRamesh Errabolu
7805ac3c3e4SFelix Kuehling static void
kfd_mem_dmaunmap_attachment(struct kgd_mem * mem,struct kfd_mem_attachment * attachment)781264fb4d3SFelix Kuehling kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
782264fb4d3SFelix Kuehling struct kfd_mem_attachment *attachment)
783264fb4d3SFelix Kuehling {
784264fb4d3SFelix Kuehling switch (attachment->type) {
785264fb4d3SFelix Kuehling case KFD_MEM_ATT_SHARED:
786264fb4d3SFelix Kuehling break;
787264fb4d3SFelix Kuehling case KFD_MEM_ATT_USERPTR:
788264fb4d3SFelix Kuehling kfd_mem_dmaunmap_userptr(mem, attachment);
789264fb4d3SFelix Kuehling break;
7905ac3c3e4SFelix Kuehling case KFD_MEM_ATT_DMABUF:
7915ac3c3e4SFelix Kuehling kfd_mem_dmaunmap_dmabuf(attachment);
7925ac3c3e4SFelix Kuehling break;
79308a2fd23SRamesh Errabolu case KFD_MEM_ATT_SG:
79408a2fd23SRamesh Errabolu kfd_mem_dmaunmap_sg_bo(mem, attachment);
79508a2fd23SRamesh Errabolu break;
796264fb4d3SFelix Kuehling default:
797264fb4d3SFelix Kuehling WARN_ON_ONCE(1);
798264fb4d3SFelix Kuehling }
799264fb4d3SFelix Kuehling }
800264fb4d3SFelix Kuehling
kfd_mem_export_dmabuf(struct kgd_mem * mem)801fd234e75SFelix Kuehling static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
802fd234e75SFelix Kuehling {
803fd234e75SFelix Kuehling if (!mem->dmabuf) {
80418192001SFelix Kuehling struct amdgpu_device *bo_adev;
80518192001SFelix Kuehling struct dma_buf *dmabuf;
80618192001SFelix Kuehling
80718192001SFelix Kuehling bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
8086c6ca71bSAl Viro dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file,
80918192001SFelix Kuehling mem->gem_handle,
810fd234e75SFelix Kuehling mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
8116c6ca71bSAl Viro DRM_RDWR : 0);
8126c6ca71bSAl Viro if (IS_ERR(dmabuf))
81318192001SFelix Kuehling return PTR_ERR(dmabuf);
81418192001SFelix Kuehling mem->dmabuf = dmabuf;
815fd234e75SFelix Kuehling }
816fd234e75SFelix Kuehling
817fd234e75SFelix Kuehling return 0;
818fd234e75SFelix Kuehling }
819fd234e75SFelix Kuehling
8209e5d2753SFelix Kuehling static int
kfd_mem_attach_dmabuf(struct amdgpu_device * adev,struct kgd_mem * mem,struct amdgpu_bo ** bo)8215ac3c3e4SFelix Kuehling kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
8225ac3c3e4SFelix Kuehling struct amdgpu_bo **bo)
8235ac3c3e4SFelix Kuehling {
8245ac3c3e4SFelix Kuehling struct drm_gem_object *gobj;
82571330557SDan Carpenter int ret;
8265ac3c3e4SFelix Kuehling
827fd234e75SFelix Kuehling ret = kfd_mem_export_dmabuf(mem);
828fd234e75SFelix Kuehling if (ret)
82971330557SDan Carpenter return ret;
8305ac3c3e4SFelix Kuehling
831a3fbb0d8SGuchun Chen gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
8325ac3c3e4SFelix Kuehling if (IS_ERR(gobj))
8335ac3c3e4SFelix Kuehling return PTR_ERR(gobj);
8345ac3c3e4SFelix Kuehling
8355ac3c3e4SFelix Kuehling *bo = gem_to_amdgpu_bo(gobj);
8365bb19893SFelix Kuehling (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
8375ac3c3e4SFelix Kuehling
8385ac3c3e4SFelix Kuehling return 0;
8395ac3c3e4SFelix Kuehling }
8405ac3c3e4SFelix Kuehling
841c780b2eeSFelix Kuehling /* kfd_mem_attach - Add a BO to a VM
842a46a2cd1SFelix Kuehling *
843a46a2cd1SFelix Kuehling * Everything that needs to bo done only once when a BO is first added
844a46a2cd1SFelix Kuehling * to a VM. It can later be mapped and unmapped many times without
845a46a2cd1SFelix Kuehling * repeating these steps.
846a46a2cd1SFelix Kuehling *
847264fb4d3SFelix Kuehling * 0. Create BO for DMA mapping, if needed
848a46a2cd1SFelix Kuehling * 1. Allocate and initialize BO VA entry data structure
849a46a2cd1SFelix Kuehling * 2. Add BO to the VM
850a46a2cd1SFelix Kuehling * 3. Determine ASIC-specific PTE flags
851a46a2cd1SFelix Kuehling * 4. Alloc page tables and directories if needed
852a46a2cd1SFelix Kuehling * 4a. Validate new page tables and directories
853a46a2cd1SFelix Kuehling */
kfd_mem_attach(struct amdgpu_device * adev,struct kgd_mem * mem,struct amdgpu_vm * vm,bool is_aql)854c780b2eeSFelix Kuehling static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
8557141394eSFelix Kuehling struct amdgpu_vm *vm, bool is_aql)
856a46a2cd1SFelix Kuehling {
857264fb4d3SFelix Kuehling struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
8584e94272fSFelix Kuehling unsigned long bo_size = mem->bo->tbo.base.size;
859a46a2cd1SFelix Kuehling uint64_t va = mem->va;
8607141394eSFelix Kuehling struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
8617141394eSFelix Kuehling struct amdgpu_bo *bo[2] = {NULL, NULL};
862ffa88b00SXiaogang Chen struct amdgpu_bo_va *bo_va;
86308a2fd23SRamesh Errabolu bool same_hive = false;
8647141394eSFelix Kuehling int i, ret;
865a46a2cd1SFelix Kuehling
866a46a2cd1SFelix Kuehling if (!va) {
867a46a2cd1SFelix Kuehling pr_err("Invalid VA when adding BO to VM\n");
868a46a2cd1SFelix Kuehling return -EINVAL;
869a46a2cd1SFelix Kuehling }
870a46a2cd1SFelix Kuehling
87108a2fd23SRamesh Errabolu /* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices
87208a2fd23SRamesh Errabolu *
87308a2fd23SRamesh Errabolu * The access path of MMIO and DOORBELL BOs of is always over PCIe.
87408a2fd23SRamesh Errabolu * In contrast the access path of VRAM BOs depens upon the type of
87508a2fd23SRamesh Errabolu * link that connects the peer device. Access over PCIe is allowed
87608a2fd23SRamesh Errabolu * if peer device has large BAR. In contrast, access over xGMI is
87708a2fd23SRamesh Errabolu * allowed for both small and large BAR configurations of peer device
87808a2fd23SRamesh Errabolu */
8798b0d068eSAlex Deucher if ((adev != bo_adev && !adev->apu_prefer_gtt) &&
88008a2fd23SRamesh Errabolu ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
88108a2fd23SRamesh Errabolu (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
88208a2fd23SRamesh Errabolu (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
88308a2fd23SRamesh Errabolu if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM)
88408a2fd23SRamesh Errabolu same_hive = amdgpu_xgmi_same_hive(adev, bo_adev);
88508a2fd23SRamesh Errabolu if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev))
88608a2fd23SRamesh Errabolu return -EINVAL;
88708a2fd23SRamesh Errabolu }
88808a2fd23SRamesh Errabolu
8897141394eSFelix Kuehling for (i = 0; i <= is_aql; i++) {
8907141394eSFelix Kuehling attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
8917141394eSFelix Kuehling if (unlikely(!attachment[i])) {
8927141394eSFelix Kuehling ret = -ENOMEM;
8937141394eSFelix Kuehling goto unwind;
8947141394eSFelix Kuehling }
895a46a2cd1SFelix Kuehling
896a46a2cd1SFelix Kuehling pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
8975b21d3e5SFelix Kuehling va + bo_size, vm);
898a46a2cd1SFelix Kuehling
89908a2fd23SRamesh Errabolu if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
900207bbfb6SShane Xiao (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) ||
9015d44a766SPhilip Yang (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) ||
90208a2fd23SRamesh Errabolu same_hive) {
90328fe4164SPhilip Yang /* Mappings on the local GPU, or VRAM mappings in the
9045d44a766SPhilip Yang * local hive, or userptr, or GTT mapping can reuse dma map
905207bbfb6SShane Xiao * address space share the original BO
9064e94272fSFelix Kuehling */
907264fb4d3SFelix Kuehling attachment[i]->type = KFD_MEM_ATT_SHARED;
9087141394eSFelix Kuehling bo[i] = mem->bo;
9097141394eSFelix Kuehling drm_gem_object_get(&bo[i]->tbo.base);
910264fb4d3SFelix Kuehling } else if (i > 0) {
911264fb4d3SFelix Kuehling /* Multiple mappings on the same GPU share the BO */
912264fb4d3SFelix Kuehling attachment[i]->type = KFD_MEM_ATT_SHARED;
913264fb4d3SFelix Kuehling bo[i] = bo[0];
914264fb4d3SFelix Kuehling drm_gem_object_get(&bo[i]->tbo.base);
915264fb4d3SFelix Kuehling } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
916264fb4d3SFelix Kuehling /* Create an SG BO to DMA-map userptrs on other GPUs */
917264fb4d3SFelix Kuehling attachment[i]->type = KFD_MEM_ATT_USERPTR;
91808a2fd23SRamesh Errabolu ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
919264fb4d3SFelix Kuehling if (ret)
920264fb4d3SFelix Kuehling goto unwind;
92108a2fd23SRamesh Errabolu /* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */
92208a2fd23SRamesh Errabolu } else if (mem->bo->tbo.type == ttm_bo_type_sg) {
92308a2fd23SRamesh Errabolu WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ||
92408a2fd23SRamesh Errabolu mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP),
92508a2fd23SRamesh Errabolu "Handing invalid SG BO in ATTACH request");
92608a2fd23SRamesh Errabolu attachment[i]->type = KFD_MEM_ATT_SG;
92708a2fd23SRamesh Errabolu ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
92808a2fd23SRamesh Errabolu if (ret)
92908a2fd23SRamesh Errabolu goto unwind;
93008a2fd23SRamesh Errabolu /* Enable acces to GTT and VRAM BOs of peer devices */
93108a2fd23SRamesh Errabolu } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT ||
93208a2fd23SRamesh Errabolu mem->domain == AMDGPU_GEM_DOMAIN_VRAM) {
9335ac3c3e4SFelix Kuehling attachment[i]->type = KFD_MEM_ATT_DMABUF;
9345ac3c3e4SFelix Kuehling ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
9355ac3c3e4SFelix Kuehling if (ret)
9365ac3c3e4SFelix Kuehling goto unwind;
9375027605aSColin Ian King pr_debug("Employ DMABUF mechanism to enable peer GPU access\n");
938264fb4d3SFelix Kuehling } else {
93908a2fd23SRamesh Errabolu WARN_ONCE(true, "Handling invalid ATTACH request");
94008a2fd23SRamesh Errabolu ret = -EINVAL;
94108a2fd23SRamesh Errabolu goto unwind;
942264fb4d3SFelix Kuehling }
9434e94272fSFelix Kuehling
944a46a2cd1SFelix Kuehling /* Add BO to VM internal data structures */
9457ef6b7f8SKent Russell ret = amdgpu_bo_reserve(bo[i], false);
9467ef6b7f8SKent Russell if (ret) {
9477ef6b7f8SKent Russell pr_debug("Unable to reserve BO during memory attach");
9487ef6b7f8SKent Russell goto unwind;
9497ef6b7f8SKent Russell }
950ffa88b00SXiaogang Chen bo_va = amdgpu_vm_bo_find(vm, bo[i]);
951ffa88b00SXiaogang Chen if (!bo_va)
952ffa88b00SXiaogang Chen bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
953ffa88b00SXiaogang Chen else
954ffa88b00SXiaogang Chen ++bo_va->ref_count;
955ffa88b00SXiaogang Chen attachment[i]->bo_va = bo_va;
9567ef6b7f8SKent Russell amdgpu_bo_unreserve(bo[i]);
9577141394eSFelix Kuehling if (unlikely(!attachment[i]->bo_va)) {
9587141394eSFelix Kuehling ret = -ENOMEM;
959a46a2cd1SFelix Kuehling pr_err("Failed to add BO object to VM. ret == %d\n",
960a46a2cd1SFelix Kuehling ret);
9617141394eSFelix Kuehling goto unwind;
962a46a2cd1SFelix Kuehling }
9637141394eSFelix Kuehling attachment[i]->va = va;
9647141394eSFelix Kuehling attachment[i]->pte_flags = get_pte_flags(adev, mem);
9657141394eSFelix Kuehling attachment[i]->adev = adev;
9667141394eSFelix Kuehling list_add(&attachment[i]->list, &mem->attachments);
967a46a2cd1SFelix Kuehling
9687141394eSFelix Kuehling va += bo_size;
9697141394eSFelix Kuehling }
970a46a2cd1SFelix Kuehling
971a46a2cd1SFelix Kuehling return 0;
972a46a2cd1SFelix Kuehling
9737141394eSFelix Kuehling unwind:
9747141394eSFelix Kuehling for (; i >= 0; i--) {
9757141394eSFelix Kuehling if (!attachment[i])
9767141394eSFelix Kuehling continue;
9777141394eSFelix Kuehling if (attachment[i]->bo_va) {
978357ef5b3SAndrew Martin (void)amdgpu_bo_reserve(bo[i], true);
979ffa88b00SXiaogang Chen if (--attachment[i]->bo_va->ref_count == 0)
980e56694f7SChristian König amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
9817ef6b7f8SKent Russell amdgpu_bo_unreserve(bo[i]);
9827141394eSFelix Kuehling list_del(&attachment[i]->list);
9837141394eSFelix Kuehling }
9847141394eSFelix Kuehling if (bo[i])
9857141394eSFelix Kuehling drm_gem_object_put(&bo[i]->tbo.base);
9867141394eSFelix Kuehling kfree(attachment[i]);
9877141394eSFelix Kuehling }
988a46a2cd1SFelix Kuehling return ret;
989a46a2cd1SFelix Kuehling }
990a46a2cd1SFelix Kuehling
kfd_mem_detach(struct kfd_mem_attachment * attachment)991c780b2eeSFelix Kuehling static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
992a46a2cd1SFelix Kuehling {
9934e94272fSFelix Kuehling struct amdgpu_bo *bo = attachment->bo_va->base.bo;
9944e94272fSFelix Kuehling
995c780b2eeSFelix Kuehling pr_debug("\t remove VA 0x%llx in entry %p\n",
996c780b2eeSFelix Kuehling attachment->va, attachment);
997ffa88b00SXiaogang Chen if (--attachment->bo_va->ref_count == 0)
998e56694f7SChristian König amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
9994e94272fSFelix Kuehling drm_gem_object_put(&bo->tbo.base);
1000c780b2eeSFelix Kuehling list_del(&attachment->list);
1001c780b2eeSFelix Kuehling kfree(attachment);
1002a46a2cd1SFelix Kuehling }
1003a46a2cd1SFelix Kuehling
add_kgd_mem_to_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info,bool userptr)1004a46a2cd1SFelix Kuehling static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
10055ae0283eSFelix Kuehling struct amdkfd_process_info *process_info,
10065ae0283eSFelix Kuehling bool userptr)
1007a46a2cd1SFelix Kuehling {
1008a46a2cd1SFelix Kuehling mutex_lock(&process_info->lock);
10095ae0283eSFelix Kuehling if (userptr)
10108abc1eb2SChristian König list_add_tail(&mem->validate_list,
10118abc1eb2SChristian König &process_info->userptr_valid_list);
10125ae0283eSFelix Kuehling else
10138abc1eb2SChristian König list_add_tail(&mem->validate_list, &process_info->kfd_bo_list);
1014a46a2cd1SFelix Kuehling mutex_unlock(&process_info->lock);
1015a46a2cd1SFelix Kuehling }
1016a46a2cd1SFelix Kuehling
remove_kgd_mem_from_kfd_bo_list(struct kgd_mem * mem,struct amdkfd_process_info * process_info)101771efab6aSOak Zeng static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
101871efab6aSOak Zeng struct amdkfd_process_info *process_info)
101971efab6aSOak Zeng {
102071efab6aSOak Zeng mutex_lock(&process_info->lock);
10218abc1eb2SChristian König list_del(&mem->validate_list);
102271efab6aSOak Zeng mutex_unlock(&process_info->lock);
102371efab6aSOak Zeng }
102471efab6aSOak Zeng
10255ae0283eSFelix Kuehling /* Initializes user pages. It registers the MMU notifier and validates
10265ae0283eSFelix Kuehling * the userptr BO in the GTT domain.
10275ae0283eSFelix Kuehling *
10285ae0283eSFelix Kuehling * The BO must already be on the userptr_valid_list. Otherwise an
10295ae0283eSFelix Kuehling * eviction and restore may happen that leaves the new BO unmapped
10305ae0283eSFelix Kuehling * with the user mode queues running.
10315ae0283eSFelix Kuehling *
10325ae0283eSFelix Kuehling * Takes the process_info->lock to protect against concurrent restore
10335ae0283eSFelix Kuehling * workers.
10345ae0283eSFelix Kuehling *
10355ae0283eSFelix Kuehling * Returns 0 for success, negative errno for errors.
10365ae0283eSFelix Kuehling */
init_user_pages(struct kgd_mem * mem,uint64_t user_addr,bool criu_resume)1037011bbb03SRajneesh Bhardwaj static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
1038011bbb03SRajneesh Bhardwaj bool criu_resume)
10395ae0283eSFelix Kuehling {
10405ae0283eSFelix Kuehling struct amdkfd_process_info *process_info = mem->process_info;
10415ae0283eSFelix Kuehling struct amdgpu_bo *bo = mem->bo;
10425ae0283eSFelix Kuehling struct ttm_operation_ctx ctx = { true, false };
1043fec8fdb5SChristian König struct hmm_range *range;
10445ae0283eSFelix Kuehling int ret = 0;
10455ae0283eSFelix Kuehling
10465ae0283eSFelix Kuehling mutex_lock(&process_info->lock);
10475ae0283eSFelix Kuehling
104877f47d23SChristian König ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
10495ae0283eSFelix Kuehling if (ret) {
10505ae0283eSFelix Kuehling pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
10515ae0283eSFelix Kuehling goto out;
10525ae0283eSFelix Kuehling }
10535ae0283eSFelix Kuehling
1054d9483ecdSChristian König ret = amdgpu_hmm_register(bo, user_addr);
10555ae0283eSFelix Kuehling if (ret) {
10565ae0283eSFelix Kuehling pr_err("%s: Failed to register MMU notifier: %d\n",
10575ae0283eSFelix Kuehling __func__, ret);
10585ae0283eSFelix Kuehling goto out;
10595ae0283eSFelix Kuehling }
10605ae0283eSFelix Kuehling
1061011bbb03SRajneesh Bhardwaj if (criu_resume) {
1062011bbb03SRajneesh Bhardwaj /*
1063011bbb03SRajneesh Bhardwaj * During a CRIU restore operation, the userptr buffer objects
1064011bbb03SRajneesh Bhardwaj * will be validated in the restore_userptr_work worker at a
1065011bbb03SRajneesh Bhardwaj * later stage when it is scheduled by another ioctl called by
1066011bbb03SRajneesh Bhardwaj * CRIU master process for the target pid for restore.
1067011bbb03SRajneesh Bhardwaj */
1068f95f51a4SFelix Kuehling mutex_lock(&process_info->notifier_lock);
1069f95f51a4SFelix Kuehling mem->invalid++;
1070f95f51a4SFelix Kuehling mutex_unlock(&process_info->notifier_lock);
1071011bbb03SRajneesh Bhardwaj mutex_unlock(&process_info->lock);
1072011bbb03SRajneesh Bhardwaj return 0;
1073011bbb03SRajneesh Bhardwaj }
1074011bbb03SRajneesh Bhardwaj
1075fec8fdb5SChristian König ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
1076318c3f4bSAlex Deucher if (ret) {
10779095e554SPhilip Yang if (ret == -EAGAIN)
10789095e554SPhilip Yang pr_debug("Failed to get user pages, try again\n");
10799095e554SPhilip Yang else
1080318c3f4bSAlex Deucher pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
1081899fbde1SPhilip Yang goto unregister_out;
1082318c3f4bSAlex Deucher }
1083318c3f4bSAlex Deucher
10845ae0283eSFelix Kuehling ret = amdgpu_bo_reserve(bo, true);
10855ae0283eSFelix Kuehling if (ret) {
10865ae0283eSFelix Kuehling pr_err("%s: Failed to reserve BO\n", __func__);
10875ae0283eSFelix Kuehling goto release_out;
10885ae0283eSFelix Kuehling }
1089c704ab18SChristian König amdgpu_bo_placement_from_domain(bo, mem->domain);
10905ae0283eSFelix Kuehling ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
10915ae0283eSFelix Kuehling if (ret)
10925ae0283eSFelix Kuehling pr_err("%s: failed to validate BO\n", __func__);
10935ae0283eSFelix Kuehling amdgpu_bo_unreserve(bo);
10945ae0283eSFelix Kuehling
10955ae0283eSFelix Kuehling release_out:
1096fec8fdb5SChristian König amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
10975ae0283eSFelix Kuehling unregister_out:
10985ae0283eSFelix Kuehling if (ret)
1099d9483ecdSChristian König amdgpu_hmm_unregister(bo);
11005ae0283eSFelix Kuehling out:
11015ae0283eSFelix Kuehling mutex_unlock(&process_info->lock);
11025ae0283eSFelix Kuehling return ret;
11035ae0283eSFelix Kuehling }
11045ae0283eSFelix Kuehling
1105a46a2cd1SFelix Kuehling /* Reserving a BO and its page table BOs must happen atomically to
1106a46a2cd1SFelix Kuehling * avoid deadlocks. Some operations update multiple VMs at once. Track
1107a46a2cd1SFelix Kuehling * all the reservation info in a context structure. Optionally a sync
1108a46a2cd1SFelix Kuehling * object can track VM updates.
1109a46a2cd1SFelix Kuehling */
1110a46a2cd1SFelix Kuehling struct bo_vm_reservation_context {
11118abc1eb2SChristian König /* DRM execution context for the reservation */
11128abc1eb2SChristian König struct drm_exec exec;
11138abc1eb2SChristian König /* Number of VMs reserved */
11148abc1eb2SChristian König unsigned int n_vms;
11158abc1eb2SChristian König /* Pointer to sync object */
11168abc1eb2SChristian König struct amdgpu_sync *sync;
1117a46a2cd1SFelix Kuehling };
1118a46a2cd1SFelix Kuehling
1119a46a2cd1SFelix Kuehling enum bo_vm_match {
1120a46a2cd1SFelix Kuehling BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
1121a46a2cd1SFelix Kuehling BO_VM_MAPPED, /* Match VMs where a BO is mapped */
1122a46a2cd1SFelix Kuehling BO_VM_ALL, /* Match all VMs a BO was added to */
1123a46a2cd1SFelix Kuehling };
1124a46a2cd1SFelix Kuehling
1125a46a2cd1SFelix Kuehling /**
1126a46a2cd1SFelix Kuehling * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
1127a46a2cd1SFelix Kuehling * @mem: KFD BO structure.
1128a46a2cd1SFelix Kuehling * @vm: the VM to reserve.
1129a46a2cd1SFelix Kuehling * @ctx: the struct that will be used in unreserve_bo_and_vms().
1130a46a2cd1SFelix Kuehling */
reserve_bo_and_vm(struct kgd_mem * mem,struct amdgpu_vm * vm,struct bo_vm_reservation_context * ctx)1131a46a2cd1SFelix Kuehling static int reserve_bo_and_vm(struct kgd_mem *mem,
1132a46a2cd1SFelix Kuehling struct amdgpu_vm *vm,
1133a46a2cd1SFelix Kuehling struct bo_vm_reservation_context *ctx)
1134a46a2cd1SFelix Kuehling {
1135a46a2cd1SFelix Kuehling struct amdgpu_bo *bo = mem->bo;
1136a46a2cd1SFelix Kuehling int ret;
1137a46a2cd1SFelix Kuehling
1138a46a2cd1SFelix Kuehling WARN_ON(!vm);
1139a46a2cd1SFelix Kuehling
1140a46a2cd1SFelix Kuehling ctx->n_vms = 1;
1141a46a2cd1SFelix Kuehling ctx->sync = &mem->sync;
114205d24935SRob Clark drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
11438abc1eb2SChristian König drm_exec_until_all_locked(&ctx->exec) {
11448abc1eb2SChristian König ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
11458abc1eb2SChristian König drm_exec_retry_on_contention(&ctx->exec);
11468abc1eb2SChristian König if (unlikely(ret))
11478abc1eb2SChristian König goto error;
1148a46a2cd1SFelix Kuehling
11494984fc57SChristian König ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
11508abc1eb2SChristian König drm_exec_retry_on_contention(&ctx->exec);
11518abc1eb2SChristian König if (unlikely(ret))
11528abc1eb2SChristian König goto error;
1153a46a2cd1SFelix Kuehling }
115410f39758SBernard Zhao return 0;
11558abc1eb2SChristian König
11568abc1eb2SChristian König error:
11578abc1eb2SChristian König pr_err("Failed to reserve buffers in ttm.\n");
11588abc1eb2SChristian König drm_exec_fini(&ctx->exec);
11598abc1eb2SChristian König return ret;
1160a46a2cd1SFelix Kuehling }
1161a46a2cd1SFelix Kuehling
1162a46a2cd1SFelix Kuehling /**
1163a46a2cd1SFelix Kuehling * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
1164a46a2cd1SFelix Kuehling * @mem: KFD BO structure.
1165a46a2cd1SFelix Kuehling * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
1166a46a2cd1SFelix Kuehling * is used. Otherwise, a single VM associated with the BO.
1167a46a2cd1SFelix Kuehling * @map_type: the mapping status that will be used to filter the VMs.
1168a46a2cd1SFelix Kuehling * @ctx: the struct that will be used in unreserve_bo_and_vms().
1169a46a2cd1SFelix Kuehling *
1170a46a2cd1SFelix Kuehling * Returns 0 for success, negative for failure.
1171a46a2cd1SFelix Kuehling */
reserve_bo_and_cond_vms(struct kgd_mem * mem,struct amdgpu_vm * vm,enum bo_vm_match map_type,struct bo_vm_reservation_context * ctx)1172a46a2cd1SFelix Kuehling static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
1173a46a2cd1SFelix Kuehling struct amdgpu_vm *vm, enum bo_vm_match map_type,
1174a46a2cd1SFelix Kuehling struct bo_vm_reservation_context *ctx)
1175a46a2cd1SFelix Kuehling {
1176c780b2eeSFelix Kuehling struct kfd_mem_attachment *entry;
11778abc1eb2SChristian König struct amdgpu_bo *bo = mem->bo;
1178a46a2cd1SFelix Kuehling int ret;
1179a46a2cd1SFelix Kuehling
1180a46a2cd1SFelix Kuehling ctx->sync = &mem->sync;
11812d6f49eeSLang Yu drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
11822d6f49eeSLang Yu DRM_EXEC_IGNORE_DUPLICATES, 0);
11838abc1eb2SChristian König drm_exec_until_all_locked(&ctx->exec) {
11848abc1eb2SChristian König ctx->n_vms = 0;
1185c780b2eeSFelix Kuehling list_for_each_entry(entry, &mem->attachments, list) {
1186a46a2cd1SFelix Kuehling if ((vm && vm != entry->bo_va->base.vm) ||
1187a46a2cd1SFelix Kuehling (entry->is_mapped != map_type
1188a46a2cd1SFelix Kuehling && map_type != BO_VM_ALL))
1189a46a2cd1SFelix Kuehling continue;
1190a46a2cd1SFelix Kuehling
11918abc1eb2SChristian König ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
11928abc1eb2SChristian König &ctx->exec, 2);
11938abc1eb2SChristian König drm_exec_retry_on_contention(&ctx->exec);
11948abc1eb2SChristian König if (unlikely(ret))
11958abc1eb2SChristian König goto error;
11968abc1eb2SChristian König ++ctx->n_vms;
1197a46a2cd1SFelix Kuehling }
1198a46a2cd1SFelix Kuehling
11998abc1eb2SChristian König ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
12008abc1eb2SChristian König drm_exec_retry_on_contention(&ctx->exec);
12018abc1eb2SChristian König if (unlikely(ret))
12028abc1eb2SChristian König goto error;
1203a46a2cd1SFelix Kuehling }
120410f39758SBernard Zhao return 0;
12058abc1eb2SChristian König
12068abc1eb2SChristian König error:
12078abc1eb2SChristian König pr_err("Failed to reserve buffers in ttm.\n");
12088abc1eb2SChristian König drm_exec_fini(&ctx->exec);
12098abc1eb2SChristian König return ret;
1210a46a2cd1SFelix Kuehling }
1211a46a2cd1SFelix Kuehling
1212a46a2cd1SFelix Kuehling /**
1213a46a2cd1SFelix Kuehling * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1214a46a2cd1SFelix Kuehling * @ctx: Reservation context to unreserve
1215a46a2cd1SFelix Kuehling * @wait: Optionally wait for a sync object representing pending VM updates
1216a46a2cd1SFelix Kuehling * @intr: Whether the wait is interruptible
1217a46a2cd1SFelix Kuehling *
1218a46a2cd1SFelix Kuehling * Also frees any resources allocated in
1219a46a2cd1SFelix Kuehling * reserve_bo_and_(cond_)vm(s). Returns the status from
1220a46a2cd1SFelix Kuehling * amdgpu_sync_wait.
1221a46a2cd1SFelix Kuehling */
unreserve_bo_and_vms(struct bo_vm_reservation_context * ctx,bool wait,bool intr)1222a46a2cd1SFelix Kuehling static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1223a46a2cd1SFelix Kuehling bool wait, bool intr)
1224a46a2cd1SFelix Kuehling {
1225a46a2cd1SFelix Kuehling int ret = 0;
1226a46a2cd1SFelix Kuehling
1227a46a2cd1SFelix Kuehling if (wait)
1228a46a2cd1SFelix Kuehling ret = amdgpu_sync_wait(ctx->sync, intr);
1229a46a2cd1SFelix Kuehling
12308abc1eb2SChristian König drm_exec_fini(&ctx->exec);
1231a46a2cd1SFelix Kuehling ctx->sync = NULL;
1232a46a2cd1SFelix Kuehling return ret;
1233a46a2cd1SFelix Kuehling }
1234a46a2cd1SFelix Kuehling
unmap_bo_from_gpuvm(struct kgd_mem * mem,struct kfd_mem_attachment * entry,struct amdgpu_sync * sync)1235834368eaSPhilip Yang static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
1236c780b2eeSFelix Kuehling struct kfd_mem_attachment *entry,
1237a46a2cd1SFelix Kuehling struct amdgpu_sync *sync)
1238a46a2cd1SFelix Kuehling {
1239a46a2cd1SFelix Kuehling struct amdgpu_bo_va *bo_va = entry->bo_va;
1240b72ed8a2SFelix Kuehling struct amdgpu_device *adev = entry->adev;
1241a46a2cd1SFelix Kuehling struct amdgpu_vm *vm = bo_va->base.vm;
1242a46a2cd1SFelix Kuehling
1243834368eaSPhilip Yang if (bo_va->queue_refcount) {
1244834368eaSPhilip Yang pr_debug("bo_va->queue_refcount %d\n", bo_va->queue_refcount);
1245834368eaSPhilip Yang return -EBUSY;
1246834368eaSPhilip Yang }
1247834368eaSPhilip Yang
1248357ef5b3SAndrew Martin (void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1249a46a2cd1SFelix Kuehling
1250357ef5b3SAndrew Martin (void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1251a46a2cd1SFelix Kuehling
1252*16590745SChristian König (void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
1253834368eaSPhilip Yang
1254834368eaSPhilip Yang return 0;
1255a46a2cd1SFelix Kuehling }
1256a46a2cd1SFelix Kuehling
update_gpuvm_pte(struct kgd_mem * mem,struct kfd_mem_attachment * entry,struct amdgpu_sync * sync)1257b72ed8a2SFelix Kuehling static int update_gpuvm_pte(struct kgd_mem *mem,
1258c780b2eeSFelix Kuehling struct kfd_mem_attachment *entry,
12594d30a83cSChristian König struct amdgpu_sync *sync)
1260a46a2cd1SFelix Kuehling {
12611e608013SYueHaibing struct amdgpu_bo_va *bo_va = entry->bo_va;
1262b72ed8a2SFelix Kuehling struct amdgpu_device *adev = entry->adev;
1263b72ed8a2SFelix Kuehling int ret;
1264b72ed8a2SFelix Kuehling
1265b72ed8a2SFelix Kuehling ret = kfd_mem_dmamap_attachment(mem, entry);
1266b72ed8a2SFelix Kuehling if (ret)
1267b72ed8a2SFelix Kuehling return ret;
1268a46a2cd1SFelix Kuehling
1269a46a2cd1SFelix Kuehling /* Update the page tables */
12708f8cc3fbSChristian König ret = amdgpu_vm_bo_update(adev, bo_va, false);
1271a46a2cd1SFelix Kuehling if (ret) {
1272a46a2cd1SFelix Kuehling pr_err("amdgpu_vm_bo_update failed\n");
1273a46a2cd1SFelix Kuehling return ret;
1274a46a2cd1SFelix Kuehling }
1275a46a2cd1SFelix Kuehling
1276*16590745SChristian König return amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
1277a46a2cd1SFelix Kuehling }
1278a46a2cd1SFelix Kuehling
map_bo_to_gpuvm(struct kgd_mem * mem,struct kfd_mem_attachment * entry,struct amdgpu_sync * sync,bool no_update_pte)1279b72ed8a2SFelix Kuehling static int map_bo_to_gpuvm(struct kgd_mem *mem,
1280b72ed8a2SFelix Kuehling struct kfd_mem_attachment *entry,
1281b72ed8a2SFelix Kuehling struct amdgpu_sync *sync,
12824d30a83cSChristian König bool no_update_pte)
1283a46a2cd1SFelix Kuehling {
1284a46a2cd1SFelix Kuehling int ret;
1285a46a2cd1SFelix Kuehling
1286a46a2cd1SFelix Kuehling /* Set virtual address for the allocation */
1287b72ed8a2SFelix Kuehling ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1288a46a2cd1SFelix Kuehling amdgpu_bo_size(entry->bo_va->base.bo),
1289a46a2cd1SFelix Kuehling entry->pte_flags);
1290a46a2cd1SFelix Kuehling if (ret) {
1291a46a2cd1SFelix Kuehling pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1292a46a2cd1SFelix Kuehling entry->va, ret);
1293a46a2cd1SFelix Kuehling return ret;
1294a46a2cd1SFelix Kuehling }
1295a46a2cd1SFelix Kuehling
12965ae0283eSFelix Kuehling if (no_update_pte)
12975ae0283eSFelix Kuehling return 0;
12985ae0283eSFelix Kuehling
12994d30a83cSChristian König ret = update_gpuvm_pte(mem, entry, sync);
1300a46a2cd1SFelix Kuehling if (ret) {
1301a46a2cd1SFelix Kuehling pr_err("update_gpuvm_pte() failed\n");
1302a46a2cd1SFelix Kuehling goto update_gpuvm_pte_failed;
1303a46a2cd1SFelix Kuehling }
1304a46a2cd1SFelix Kuehling
1305a46a2cd1SFelix Kuehling return 0;
1306a46a2cd1SFelix Kuehling
1307a46a2cd1SFelix Kuehling update_gpuvm_pte_failed:
1308b72ed8a2SFelix Kuehling unmap_bo_from_gpuvm(mem, entry, sync);
1309101b8104SPhilip Yang kfd_mem_dmaunmap_attachment(mem, entry);
1310a46a2cd1SFelix Kuehling return ret;
1311a46a2cd1SFelix Kuehling }
1312a46a2cd1SFelix Kuehling
process_validate_vms(struct amdkfd_process_info * process_info,struct ww_acquire_ctx * ticket)131350661eb1SFelix Kuehling static int process_validate_vms(struct amdkfd_process_info *process_info,
131450661eb1SFelix Kuehling struct ww_acquire_ctx *ticket)
1315a46a2cd1SFelix Kuehling {
13165b21d3e5SFelix Kuehling struct amdgpu_vm *peer_vm;
1317a46a2cd1SFelix Kuehling int ret;
1318a46a2cd1SFelix Kuehling
1319a46a2cd1SFelix Kuehling list_for_each_entry(peer_vm, &process_info->vm_list_head,
1320a46a2cd1SFelix Kuehling vm_list_node) {
132150661eb1SFelix Kuehling ret = vm_validate_pt_pd_bos(peer_vm, ticket);
1322a46a2cd1SFelix Kuehling if (ret)
1323a46a2cd1SFelix Kuehling return ret;
1324a46a2cd1SFelix Kuehling }
1325a46a2cd1SFelix Kuehling
1326a46a2cd1SFelix Kuehling return 0;
1327a46a2cd1SFelix Kuehling }
1328a46a2cd1SFelix Kuehling
process_sync_pds_resv(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)13299130cc01SHarish Kasiviswanathan static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
13309130cc01SHarish Kasiviswanathan struct amdgpu_sync *sync)
13319130cc01SHarish Kasiviswanathan {
13329130cc01SHarish Kasiviswanathan struct amdgpu_vm *peer_vm;
13339130cc01SHarish Kasiviswanathan int ret;
13349130cc01SHarish Kasiviswanathan
13359130cc01SHarish Kasiviswanathan list_for_each_entry(peer_vm, &process_info->vm_list_head,
13369130cc01SHarish Kasiviswanathan vm_list_node) {
1337391629bdSNirmoy Das struct amdgpu_bo *pd = peer_vm->root.bo;
13389130cc01SHarish Kasiviswanathan
13395d319660SChristian König ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
13405d319660SChristian König AMDGPU_SYNC_NE_OWNER,
13415d319660SChristian König AMDGPU_FENCE_OWNER_KFD);
13429130cc01SHarish Kasiviswanathan if (ret)
13439130cc01SHarish Kasiviswanathan return ret;
13449130cc01SHarish Kasiviswanathan }
13459130cc01SHarish Kasiviswanathan
13469130cc01SHarish Kasiviswanathan return 0;
13479130cc01SHarish Kasiviswanathan }
13489130cc01SHarish Kasiviswanathan
process_update_pds(struct amdkfd_process_info * process_info,struct amdgpu_sync * sync)1349a46a2cd1SFelix Kuehling static int process_update_pds(struct amdkfd_process_info *process_info,
1350a46a2cd1SFelix Kuehling struct amdgpu_sync *sync)
1351a46a2cd1SFelix Kuehling {
13525b21d3e5SFelix Kuehling struct amdgpu_vm *peer_vm;
1353a46a2cd1SFelix Kuehling int ret;
1354a46a2cd1SFelix Kuehling
1355a46a2cd1SFelix Kuehling list_for_each_entry(peer_vm, &process_info->vm_list_head,
1356a46a2cd1SFelix Kuehling vm_list_node) {
13575b21d3e5SFelix Kuehling ret = vm_update_pds(peer_vm, sync);
1358a46a2cd1SFelix Kuehling if (ret)
1359a46a2cd1SFelix Kuehling return ret;
1360a46a2cd1SFelix Kuehling }
1361a46a2cd1SFelix Kuehling
1362a46a2cd1SFelix Kuehling return 0;
1363a46a2cd1SFelix Kuehling }
1364a46a2cd1SFelix Kuehling
init_kfd_vm(struct amdgpu_vm * vm,void ** process_info,struct dma_fence ** ef)1365ede0dd86SFelix Kuehling static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1366a46a2cd1SFelix Kuehling struct dma_fence **ef)
1367a46a2cd1SFelix Kuehling {
13683486625bSFelix Kuehling struct amdkfd_process_info *info = NULL;
1369ede0dd86SFelix Kuehling int ret;
1370a46a2cd1SFelix Kuehling
1371a46a2cd1SFelix Kuehling if (!*process_info) {
1372a46a2cd1SFelix Kuehling info = kzalloc(sizeof(*info), GFP_KERNEL);
1373ede0dd86SFelix Kuehling if (!info)
1374ede0dd86SFelix Kuehling return -ENOMEM;
1375a46a2cd1SFelix Kuehling
1376a46a2cd1SFelix Kuehling mutex_init(&info->lock);
1377f95f51a4SFelix Kuehling mutex_init(&info->notifier_lock);
1378a46a2cd1SFelix Kuehling INIT_LIST_HEAD(&info->vm_list_head);
1379a46a2cd1SFelix Kuehling INIT_LIST_HEAD(&info->kfd_bo_list);
13805ae0283eSFelix Kuehling INIT_LIST_HEAD(&info->userptr_valid_list);
13815ae0283eSFelix Kuehling INIT_LIST_HEAD(&info->userptr_inval_list);
1382a46a2cd1SFelix Kuehling
1383a46a2cd1SFelix Kuehling info->eviction_fence =
1384a46a2cd1SFelix Kuehling amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1385eb2cec55SAlex Sierra current->mm,
1386eb2cec55SAlex Sierra NULL);
1387a46a2cd1SFelix Kuehling if (!info->eviction_fence) {
1388a46a2cd1SFelix Kuehling pr_err("Failed to create eviction fence\n");
1389ede0dd86SFelix Kuehling ret = -ENOMEM;
1390a46a2cd1SFelix Kuehling goto create_evict_fence_fail;
1391a46a2cd1SFelix Kuehling }
1392a46a2cd1SFelix Kuehling
13935ae0283eSFelix Kuehling info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
13945ae0283eSFelix Kuehling INIT_DELAYED_WORK(&info->restore_userptr_work,
13955ae0283eSFelix Kuehling amdgpu_amdkfd_restore_userptr_worker);
13965ae0283eSFelix Kuehling
1397a46a2cd1SFelix Kuehling *process_info = info;
1398a46a2cd1SFelix Kuehling }
1399a46a2cd1SFelix Kuehling
1400ede0dd86SFelix Kuehling vm->process_info = *process_info;
1401a46a2cd1SFelix Kuehling
14023486625bSFelix Kuehling /* Validate page directory and attach eviction fence */
1403391629bdSNirmoy Das ret = amdgpu_bo_reserve(vm->root.bo, true);
14043486625bSFelix Kuehling if (ret)
14053486625bSFelix Kuehling goto reserve_pd_fail;
140650661eb1SFelix Kuehling ret = vm_validate_pt_pd_bos(vm, NULL);
14073486625bSFelix Kuehling if (ret) {
14083486625bSFelix Kuehling pr_err("validate_pt_pd_bos() failed\n");
14093486625bSFelix Kuehling goto validate_pd_fail;
14103486625bSFelix Kuehling }
1411391629bdSNirmoy Das ret = amdgpu_bo_sync_wait(vm->root.bo,
1412d38ca8f0SColin Ian King AMDGPU_FENCE_OWNER_KFD, false);
14133486625bSFelix Kuehling if (ret)
14143486625bSFelix Kuehling goto wait_pd_fail;
1415c8d4c18bSChristian König ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1416dd68722cSFelix Kuehling if (ret)
1417dd68722cSFelix Kuehling goto reserve_shared_fail;
141842470840SChristian König dma_resv_add_fence(vm->root.bo->tbo.base.resv,
141942470840SChristian König &vm->process_info->eviction_fence->base,
142042470840SChristian König DMA_RESV_USAGE_BOOKKEEP);
1421391629bdSNirmoy Das amdgpu_bo_unreserve(vm->root.bo);
14223486625bSFelix Kuehling
14233486625bSFelix Kuehling /* Update process info */
1424ede0dd86SFelix Kuehling mutex_lock(&vm->process_info->lock);
1425ede0dd86SFelix Kuehling list_add_tail(&vm->vm_list_node,
1426ede0dd86SFelix Kuehling &(vm->process_info->vm_list_head));
1427ede0dd86SFelix Kuehling vm->process_info->n_vms++;
14285fa43628SLang Yu if (ef)
14299a1c1339SFelix Kuehling *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
1430ede0dd86SFelix Kuehling mutex_unlock(&vm->process_info->lock);
1431a46a2cd1SFelix Kuehling
1432ede0dd86SFelix Kuehling return 0;
1433a46a2cd1SFelix Kuehling
1434dd68722cSFelix Kuehling reserve_shared_fail:
14353486625bSFelix Kuehling wait_pd_fail:
14363486625bSFelix Kuehling validate_pd_fail:
1437391629bdSNirmoy Das amdgpu_bo_unreserve(vm->root.bo);
14383486625bSFelix Kuehling reserve_pd_fail:
1439ede0dd86SFelix Kuehling vm->process_info = NULL;
1440ede0dd86SFelix Kuehling if (info) {
1441ede0dd86SFelix Kuehling dma_fence_put(&info->eviction_fence->base);
1442ede0dd86SFelix Kuehling *process_info = NULL;
14435ae0283eSFelix Kuehling put_pid(info->pid);
1444a46a2cd1SFelix Kuehling create_evict_fence_fail:
1445a46a2cd1SFelix Kuehling mutex_destroy(&info->lock);
1446f95f51a4SFelix Kuehling mutex_destroy(&info->notifier_lock);
1447a46a2cd1SFelix Kuehling kfree(info);
1448ede0dd86SFelix Kuehling }
1449a46a2cd1SFelix Kuehling return ret;
1450a46a2cd1SFelix Kuehling }
1451a46a2cd1SFelix Kuehling
1452d25e35bcSRamesh Errabolu /**
1453d25e35bcSRamesh Errabolu * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1454d25e35bcSRamesh Errabolu * @bo: Handle of buffer object being pinned
1455d25e35bcSRamesh Errabolu * @domain: Domain into which BO should be pinned
1456d25e35bcSRamesh Errabolu *
1457d25e35bcSRamesh Errabolu * - USERPTR BOs are UNPINNABLE and will return error
1458d25e35bcSRamesh Errabolu * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1459d25e35bcSRamesh Errabolu * PIN count incremented. It is valid to PIN a BO multiple times
1460d25e35bcSRamesh Errabolu *
1461d25e35bcSRamesh Errabolu * Return: ZERO if successful in pinning, Non-Zero in case of error.
1462d25e35bcSRamesh Errabolu */
amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo * bo,u32 domain)1463d25e35bcSRamesh Errabolu static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
1464d25e35bcSRamesh Errabolu {
1465d25e35bcSRamesh Errabolu int ret = 0;
1466d25e35bcSRamesh Errabolu
1467d25e35bcSRamesh Errabolu ret = amdgpu_bo_reserve(bo, false);
1468d25e35bcSRamesh Errabolu if (unlikely(ret))
1469d25e35bcSRamesh Errabolu return ret;
1470d25e35bcSRamesh Errabolu
1471d53ce023SPhilip Yang if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
1472d53ce023SPhilip Yang /*
1473d53ce023SPhilip Yang * If bo is not contiguous on VRAM, move to system memory first to ensure
1474d53ce023SPhilip Yang * we can get contiguous VRAM space after evicting other BOs.
1475d53ce023SPhilip Yang */
1476d53ce023SPhilip Yang if (!(bo->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
1477d53ce023SPhilip Yang struct ttm_operation_ctx ctx = { true, false };
1478d53ce023SPhilip Yang
1479d53ce023SPhilip Yang amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
1480d53ce023SPhilip Yang ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1481d53ce023SPhilip Yang if (unlikely(ret)) {
1482d53ce023SPhilip Yang pr_debug("validate bo 0x%p to GTT failed %d\n", &bo->tbo, ret);
1483d53ce023SPhilip Yang goto out;
1484d53ce023SPhilip Yang }
1485d53ce023SPhilip Yang }
1486d53ce023SPhilip Yang }
1487d53ce023SPhilip Yang
1488f2be7b39SChristian König ret = amdgpu_bo_pin(bo, domain);
1489d25e35bcSRamesh Errabolu if (ret)
1490d25e35bcSRamesh Errabolu pr_err("Error in Pinning BO to domain: %d\n", domain);
1491d25e35bcSRamesh Errabolu
1492d25e35bcSRamesh Errabolu amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
1493d53ce023SPhilip Yang out:
1494d25e35bcSRamesh Errabolu amdgpu_bo_unreserve(bo);
1495d25e35bcSRamesh Errabolu return ret;
1496d25e35bcSRamesh Errabolu }
1497d25e35bcSRamesh Errabolu
1498d25e35bcSRamesh Errabolu /**
1499d25e35bcSRamesh Errabolu * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1500d25e35bcSRamesh Errabolu * @bo: Handle of buffer object being unpinned
1501d25e35bcSRamesh Errabolu *
1502d25e35bcSRamesh Errabolu * - Is a illegal request for USERPTR BOs and is ignored
1503d25e35bcSRamesh Errabolu * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1504d25e35bcSRamesh Errabolu * PIN count decremented. Calls to UNPIN must balance calls to PIN
1505d25e35bcSRamesh Errabolu */
amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo * bo)150637ba5bbcSRamesh Errabolu static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
1507d25e35bcSRamesh Errabolu {
1508d25e35bcSRamesh Errabolu int ret = 0;
1509d25e35bcSRamesh Errabolu
1510d25e35bcSRamesh Errabolu ret = amdgpu_bo_reserve(bo, false);
1511d25e35bcSRamesh Errabolu if (unlikely(ret))
1512d25e35bcSRamesh Errabolu return;
1513d25e35bcSRamesh Errabolu
1514d25e35bcSRamesh Errabolu amdgpu_bo_unpin(bo);
1515d25e35bcSRamesh Errabolu amdgpu_bo_unreserve(bo);
1516d25e35bcSRamesh Errabolu }
1517d25e35bcSRamesh Errabolu
amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device * adev,struct amdgpu_vm * avm,void ** process_info,struct dma_fence ** ef)1518dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
151923b02b0eSPhilip Yang struct amdgpu_vm *avm,
1520b40a6ab2SFelix Kuehling void **process_info,
1521ede0dd86SFelix Kuehling struct dma_fence **ef)
1522ede0dd86SFelix Kuehling {
1523ede0dd86SFelix Kuehling int ret;
1524ede0dd86SFelix Kuehling
1525ede0dd86SFelix Kuehling /* Already a compute VM? */
1526ede0dd86SFelix Kuehling if (avm->process_info)
1527ede0dd86SFelix Kuehling return -EINVAL;
1528ede0dd86SFelix Kuehling
1529ede0dd86SFelix Kuehling /* Convert VM into a compute VM */
153088f7f881SNirmoy Das ret = amdgpu_vm_make_compute(adev, avm);
1531ede0dd86SFelix Kuehling if (ret)
1532ede0dd86SFelix Kuehling return ret;
1533ede0dd86SFelix Kuehling
1534ede0dd86SFelix Kuehling /* Initialize KFD part of the VM and process info */
1535ede0dd86SFelix Kuehling ret = init_kfd_vm(avm, process_info, ef);
1536ede0dd86SFelix Kuehling if (ret)
1537ede0dd86SFelix Kuehling return ret;
1538ede0dd86SFelix Kuehling
1539b40a6ab2SFelix Kuehling amdgpu_vm_set_task_info(avm);
1540ede0dd86SFelix Kuehling
1541ede0dd86SFelix Kuehling return 0;
1542ede0dd86SFelix Kuehling }
1543ede0dd86SFelix Kuehling
amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device * adev,struct amdgpu_vm * vm)1544ede0dd86SFelix Kuehling void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1545ede0dd86SFelix Kuehling struct amdgpu_vm *vm)
1546ede0dd86SFelix Kuehling {
1547ede0dd86SFelix Kuehling struct amdkfd_process_info *process_info = vm->process_info;
1548ede0dd86SFelix Kuehling
1549ede0dd86SFelix Kuehling if (!process_info)
1550a46a2cd1SFelix Kuehling return;
1551a46a2cd1SFelix Kuehling
1552ede0dd86SFelix Kuehling /* Update process info */
1553a46a2cd1SFelix Kuehling mutex_lock(&process_info->lock);
1554a46a2cd1SFelix Kuehling process_info->n_vms--;
1555ede0dd86SFelix Kuehling list_del(&vm->vm_list_node);
1556a46a2cd1SFelix Kuehling mutex_unlock(&process_info->lock);
1557a46a2cd1SFelix Kuehling
1558f4a3c42bSxinhui pan vm->process_info = NULL;
1559f4a3c42bSxinhui pan
1560ede0dd86SFelix Kuehling /* Release per-process resources when last compute VM is destroyed */
1561a46a2cd1SFelix Kuehling if (!process_info->n_vms) {
1562a46a2cd1SFelix Kuehling WARN_ON(!list_empty(&process_info->kfd_bo_list));
15635ae0283eSFelix Kuehling WARN_ON(!list_empty(&process_info->userptr_valid_list));
15645ae0283eSFelix Kuehling WARN_ON(!list_empty(&process_info->userptr_inval_list));
1565a46a2cd1SFelix Kuehling
1566a46a2cd1SFelix Kuehling dma_fence_put(&process_info->eviction_fence->base);
15675ae0283eSFelix Kuehling cancel_delayed_work_sync(&process_info->restore_userptr_work);
15685ae0283eSFelix Kuehling put_pid(process_info->pid);
1569a46a2cd1SFelix Kuehling mutex_destroy(&process_info->lock);
1570f95f51a4SFelix Kuehling mutex_destroy(&process_info->notifier_lock);
1571a46a2cd1SFelix Kuehling kfree(process_info);
1572a46a2cd1SFelix Kuehling }
1573ede0dd86SFelix Kuehling }
1574ede0dd86SFelix Kuehling
amdgpu_amdkfd_gpuvm_get_process_page_dir(void * drm_priv)1575b40a6ab2SFelix Kuehling uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1576a46a2cd1SFelix Kuehling {
1577b40a6ab2SFelix Kuehling struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1578391629bdSNirmoy Das struct amdgpu_bo *pd = avm->root.bo;
1579e715c6d0SShaoyun Liu struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1580a46a2cd1SFelix Kuehling
1581e715c6d0SShaoyun Liu if (adev->asic_type < CHIP_VEGA10)
1582a46a2cd1SFelix Kuehling return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1583e715c6d0SShaoyun Liu return avm->pd_phys_addr;
1584a46a2cd1SFelix Kuehling }
1585a46a2cd1SFelix Kuehling
amdgpu_amdkfd_block_mmu_notifications(void * p)1586011bbb03SRajneesh Bhardwaj void amdgpu_amdkfd_block_mmu_notifications(void *p)
1587011bbb03SRajneesh Bhardwaj {
1588011bbb03SRajneesh Bhardwaj struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1589011bbb03SRajneesh Bhardwaj
1590011bbb03SRajneesh Bhardwaj mutex_lock(&pinfo->lock);
1591011bbb03SRajneesh Bhardwaj WRITE_ONCE(pinfo->block_mmu_notifications, true);
1592011bbb03SRajneesh Bhardwaj mutex_unlock(&pinfo->lock);
1593011bbb03SRajneesh Bhardwaj }
1594011bbb03SRajneesh Bhardwaj
amdgpu_amdkfd_criu_resume(void * p)1595011bbb03SRajneesh Bhardwaj int amdgpu_amdkfd_criu_resume(void *p)
1596011bbb03SRajneesh Bhardwaj {
1597011bbb03SRajneesh Bhardwaj int ret = 0;
1598011bbb03SRajneesh Bhardwaj struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1599011bbb03SRajneesh Bhardwaj
1600011bbb03SRajneesh Bhardwaj mutex_lock(&pinfo->lock);
1601011bbb03SRajneesh Bhardwaj pr_debug("scheduling work\n");
1602f95f51a4SFelix Kuehling mutex_lock(&pinfo->notifier_lock);
1603f95f51a4SFelix Kuehling pinfo->evicted_bos++;
1604f95f51a4SFelix Kuehling mutex_unlock(&pinfo->notifier_lock);
1605011bbb03SRajneesh Bhardwaj if (!READ_ONCE(pinfo->block_mmu_notifications)) {
1606011bbb03SRajneesh Bhardwaj ret = -EINVAL;
1607011bbb03SRajneesh Bhardwaj goto out_unlock;
1608011bbb03SRajneesh Bhardwaj }
1609011bbb03SRajneesh Bhardwaj WRITE_ONCE(pinfo->block_mmu_notifications, false);
16109a1c1339SFelix Kuehling queue_delayed_work(system_freezable_wq,
16119a1c1339SFelix Kuehling &pinfo->restore_userptr_work, 0);
1612011bbb03SRajneesh Bhardwaj
1613011bbb03SRajneesh Bhardwaj out_unlock:
1614011bbb03SRajneesh Bhardwaj mutex_unlock(&pinfo->lock);
1615011bbb03SRajneesh Bhardwaj return ret;
1616011bbb03SRajneesh Bhardwaj }
1617011bbb03SRajneesh Bhardwaj
amdgpu_amdkfd_get_available_memory(struct amdgpu_device * adev,uint8_t xcp_id)16181c77527aSMukul Joshi size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
16191c77527aSMukul Joshi uint8_t xcp_id)
16209731dd4cSDaniel Phillips {
16219731dd4cSDaniel Phillips uint64_t reserved_for_pt =
16229731dd4cSDaniel Phillips ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
1623473af28dSHawking Zhang struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1624473af28dSHawking Zhang uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0);
16257cb3cfc0SDaniel Phillips ssize_t available;
16261c77527aSMukul Joshi uint64_t vram_available, system_mem_available, ttm_mem_available;
1627e7c94bfbSJingyu Wang
16289731dd4cSDaniel Phillips spin_lock(&kfd_mem_limit.mem_limit_lock);
16291c77527aSMukul Joshi vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
16301c77527aSMukul Joshi - adev->kfd.vram_used_aligned[xcp_id]
16319731dd4cSDaniel Phillips - atomic64_read(&adev->vram_pin_size)
1632473af28dSHawking Zhang - reserved_for_pt
1633473af28dSHawking Zhang - reserved_for_ras;
16341c77527aSMukul Joshi
16358b0d068eSAlex Deucher if (adev->apu_prefer_gtt) {
16361c77527aSMukul Joshi system_mem_available = no_system_mem_limit ?
16371c77527aSMukul Joshi kfd_mem_limit.max_system_mem_limit :
16381c77527aSMukul Joshi kfd_mem_limit.max_system_mem_limit -
16391c77527aSMukul Joshi kfd_mem_limit.system_mem_used;
16401c77527aSMukul Joshi
16411c77527aSMukul Joshi ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit -
16421c77527aSMukul Joshi kfd_mem_limit.ttm_mem_used;
16431c77527aSMukul Joshi
16441c77527aSMukul Joshi available = min3(system_mem_available, ttm_mem_available,
16451c77527aSMukul Joshi vram_available);
16461c77527aSMukul Joshi available = ALIGN_DOWN(available, PAGE_SIZE);
16471c77527aSMukul Joshi } else {
16481c77527aSMukul Joshi available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN);
16491c77527aSMukul Joshi }
16501c77527aSMukul Joshi
16519731dd4cSDaniel Phillips spin_unlock(&kfd_mem_limit.mem_limit_lock);
16529731dd4cSDaniel Phillips
16537cb3cfc0SDaniel Phillips if (available < 0)
16547cb3cfc0SDaniel Phillips available = 0;
16557cb3cfc0SDaniel Phillips
16561c77527aSMukul Joshi return available;
16579731dd4cSDaniel Phillips }
16589731dd4cSDaniel Phillips
amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(struct amdgpu_device * adev,uint64_t va,uint64_t size,void * drm_priv,struct kgd_mem ** mem,uint64_t * offset,uint32_t flags,bool criu_resume)1659a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1660dff63da9SGraham Sider struct amdgpu_device *adev, uint64_t va, uint64_t size,
1661b40a6ab2SFelix Kuehling void *drm_priv, struct kgd_mem **mem,
1662011bbb03SRajneesh Bhardwaj uint64_t *offset, uint32_t flags, bool criu_resume)
1663a46a2cd1SFelix Kuehling {
1664b40a6ab2SFelix Kuehling struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
16653ebfd221SPhilip Yang struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm);
1666b408a548SFelix Kuehling enum ttm_bo_type bo_type = ttm_bo_type_device;
1667b408a548SFelix Kuehling struct sg_table *sg = NULL;
16685ae0283eSFelix Kuehling uint64_t user_addr = 0;
1669a46a2cd1SFelix Kuehling struct amdgpu_bo *bo;
1670a872c152SPhilip Yang struct drm_gem_object *gobj = NULL;
16715ae0283eSFelix Kuehling u32 domain, alloc_domain;
16720c2dece8SPhilip Yang uint64_t aligned_size;
16733ebfd221SPhilip Yang int8_t xcp_id = -1;
1674a46a2cd1SFelix Kuehling u64 alloc_flags;
1675a46a2cd1SFelix Kuehling int ret;
1676a46a2cd1SFelix Kuehling
1677a46a2cd1SFelix Kuehling /*
1678a46a2cd1SFelix Kuehling * Check on which domain to allocate BO
1679a46a2cd1SFelix Kuehling */
16801d251d90SYong Zhao if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
16815ae0283eSFelix Kuehling domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1682970c1646SRajneesh Bhardwaj
16838b0d068eSAlex Deucher if (adev->apu_prefer_gtt) {
1684970c1646SRajneesh Bhardwaj domain = AMDGPU_GEM_DOMAIN_GTT;
168553c5692eSPhilip Yang alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1686970c1646SRajneesh Bhardwaj alloc_flags = 0;
1687970c1646SRajneesh Bhardwaj } else {
16886856e4b6SFelix Kuehling alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
16891d251d90SYong Zhao alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
169099e7d65cSFelix Kuehling AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1691155ce502SPhilip Yang
1692155ce502SPhilip Yang /* For contiguous VRAM allocation */
1693155ce502SPhilip Yang if (flags & KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS)
1694155ce502SPhilip Yang alloc_flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1695970c1646SRajneesh Bhardwaj }
169618cf073fSGuchun Chen xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
169718cf073fSGuchun Chen 0 : fpriv->xcp_id;
16981d251d90SYong Zhao } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
16995ae0283eSFelix Kuehling domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1700a46a2cd1SFelix Kuehling alloc_flags = 0;
17017226f40aSLang Yu } else {
17025ae0283eSFelix Kuehling domain = AMDGPU_GEM_DOMAIN_GTT;
17035ae0283eSFelix Kuehling alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
17045bb19893SFelix Kuehling alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
17057226f40aSLang Yu
17067226f40aSLang Yu if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
17075ae0283eSFelix Kuehling if (!offset || !*offset)
17085ae0283eSFelix Kuehling return -EINVAL;
170935f3fc87SAndrey Konovalov user_addr = untagged_addr(*offset);
17101d251d90SYong Zhao } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
17111d251d90SYong Zhao KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1712b408a548SFelix Kuehling bo_type = ttm_bo_type_sg;
1713b408a548SFelix Kuehling if (size > UINT_MAX)
1714b408a548SFelix Kuehling return -EINVAL;
171508a2fd23SRamesh Errabolu sg = create_sg_table(*offset, size);
1716b408a548SFelix Kuehling if (!sg)
1717b408a548SFelix Kuehling return -ENOMEM;
1718a46a2cd1SFelix Kuehling } else {
1719a46a2cd1SFelix Kuehling return -EINVAL;
1720a46a2cd1SFelix Kuehling }
17217226f40aSLang Yu }
1722a46a2cd1SFelix Kuehling
1723d1a372afSFelix Kuehling if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
1724d1a372afSFelix Kuehling alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
17255f248462SDavid Francis if (flags & KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT)
17265f248462SDavid Francis alloc_flags |= AMDGPU_GEM_CREATE_EXT_COHERENT;
1727d1a372afSFelix Kuehling if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
1728d1a372afSFelix Kuehling alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
1729d1a372afSFelix Kuehling
1730a46a2cd1SFelix Kuehling *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1731b408a548SFelix Kuehling if (!*mem) {
1732b408a548SFelix Kuehling ret = -ENOMEM;
1733b408a548SFelix Kuehling goto err;
1734b408a548SFelix Kuehling }
1735c780b2eeSFelix Kuehling INIT_LIST_HEAD(&(*mem)->attachments);
1736a46a2cd1SFelix Kuehling mutex_init(&(*mem)->lock);
17371d251d90SYong Zhao (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1738a46a2cd1SFelix Kuehling
1739a46a2cd1SFelix Kuehling /* Workaround for AQL queue wraparound bug. Map the same
1740a46a2cd1SFelix Kuehling * memory twice. That means we only actually allocate half
1741a46a2cd1SFelix Kuehling * the memory.
1742a46a2cd1SFelix Kuehling */
1743a46a2cd1SFelix Kuehling if ((*mem)->aql_queue)
17440c2dece8SPhilip Yang size >>= 1;
17450c2dece8SPhilip Yang aligned_size = PAGE_ALIGN(size);
1746a46a2cd1SFelix Kuehling
1747d0ba51b1SFelix Kuehling (*mem)->alloc_flags = flags;
1748a46a2cd1SFelix Kuehling
1749a46a2cd1SFelix Kuehling amdgpu_sync_create(&(*mem)->sync);
1750a46a2cd1SFelix Kuehling
17511c77527aSMukul Joshi ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags,
17521c77527aSMukul Joshi xcp_id);
1753a46a2cd1SFelix Kuehling if (ret) {
1754325f4b59SYifan Zhang pr_debug("Insufficient memory\n");
17555d240da9SEric Huang goto err_reserve_limit;
1756a46a2cd1SFelix Kuehling }
1757a46a2cd1SFelix Kuehling
17583ebfd221SPhilip Yang pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n",
175953c5692eSPhilip Yang va, (*mem)->aql_queue ? size << 1 : size,
17603ebfd221SPhilip Yang domain_string(alloc_domain), xcp_id);
1761a46a2cd1SFelix Kuehling
17620c2dece8SPhilip Yang ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
17633ebfd221SPhilip Yang bo_type, NULL, &gobj, xcp_id + 1);
1764a46a2cd1SFelix Kuehling if (ret) {
1765a46a2cd1SFelix Kuehling pr_debug("Failed to create BO on domain %s. ret %d\n",
1766a46a2cd1SFelix Kuehling domain_string(alloc_domain), ret);
1767a46a2cd1SFelix Kuehling goto err_bo_create;
1768a46a2cd1SFelix Kuehling }
1769d4ec4bdcSFelix Kuehling ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1770d4ec4bdcSFelix Kuehling if (ret) {
1771d4ec4bdcSFelix Kuehling pr_debug("Failed to allow vma node access. ret %d\n", ret);
1772d4ec4bdcSFelix Kuehling goto err_node_allow;
1773d4ec4bdcSFelix Kuehling }
177418192001SFelix Kuehling ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle);
177518192001SFelix Kuehling if (ret)
177618192001SFelix Kuehling goto err_gem_handle_create;
1777875440fdSHuang Rui bo = gem_to_amdgpu_bo(gobj);
1778b408a548SFelix Kuehling if (bo_type == ttm_bo_type_sg) {
1779b408a548SFelix Kuehling bo->tbo.sg = sg;
1780b408a548SFelix Kuehling bo->tbo.ttm->sg = sg;
1781b408a548SFelix Kuehling }
1782a46a2cd1SFelix Kuehling bo->kfd_bo = *mem;
1783a46a2cd1SFelix Kuehling (*mem)->bo = bo;
17845ae0283eSFelix Kuehling if (user_addr)
1785f04c79cfSAlex Sierra bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1786a46a2cd1SFelix Kuehling
1787a46a2cd1SFelix Kuehling (*mem)->va = va;
17885ae0283eSFelix Kuehling (*mem)->domain = domain;
1789a46a2cd1SFelix Kuehling (*mem)->mapped_to_gpu_memory = 0;
17905b21d3e5SFelix Kuehling (*mem)->process_info = avm->process_info;
1791970c1646SRajneesh Bhardwaj
17925ae0283eSFelix Kuehling add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
17935ae0283eSFelix Kuehling
17945ae0283eSFelix Kuehling if (user_addr) {
1795fb0a0625SYifan Zhang pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
1796011bbb03SRajneesh Bhardwaj ret = init_user_pages(*mem, user_addr, criu_resume);
179771efab6aSOak Zeng if (ret)
17985ae0283eSFelix Kuehling goto allocate_init_user_pages_failed;
1799a899fe8bSPhilip Yang } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1800d25e35bcSRamesh Errabolu KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1801d25e35bcSRamesh Errabolu ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1802d25e35bcSRamesh Errabolu if (ret) {
1803d25e35bcSRamesh Errabolu pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
1804d25e35bcSRamesh Errabolu goto err_pin_bo;
1805d25e35bcSRamesh Errabolu }
1806d25e35bcSRamesh Errabolu bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
1807d25e35bcSRamesh Errabolu bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
18080e2e7c5bSFelix Kuehling } else {
18090e2e7c5bSFelix Kuehling mutex_lock(&avm->process_info->lock);
18100e2e7c5bSFelix Kuehling if (avm->process_info->eviction_fence &&
18110e2e7c5bSFelix Kuehling !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
18120e2e7c5bSFelix Kuehling ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain,
18130e2e7c5bSFelix Kuehling &avm->process_info->eviction_fence->base);
18140e2e7c5bSFelix Kuehling mutex_unlock(&avm->process_info->lock);
18150e2e7c5bSFelix Kuehling if (ret)
18160e2e7c5bSFelix Kuehling goto err_validate_bo;
1817d25e35bcSRamesh Errabolu }
1818d25e35bcSRamesh Errabolu
1819a899fe8bSPhilip Yang if (offset)
1820a899fe8bSPhilip Yang *offset = amdgpu_bo_mmap_offset(bo);
1821a899fe8bSPhilip Yang
1822a46a2cd1SFelix Kuehling return 0;
1823a46a2cd1SFelix Kuehling
18245ae0283eSFelix Kuehling allocate_init_user_pages_failed:
1825d25e35bcSRamesh Errabolu err_pin_bo:
18260e2e7c5bSFelix Kuehling err_validate_bo:
1827a899fe8bSPhilip Yang remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
182818192001SFelix Kuehling drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle);
182918192001SFelix Kuehling err_gem_handle_create:
1830d4ec4bdcSFelix Kuehling drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1831d4ec4bdcSFelix Kuehling err_node_allow:
18325ae0283eSFelix Kuehling /* Don't unreserve system mem limit twice */
18335d240da9SEric Huang goto err_reserve_limit;
1834a46a2cd1SFelix Kuehling err_bo_create:
18351c77527aSMukul Joshi amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
18365d240da9SEric Huang err_reserve_limit:
1837e53a1713SMukul Joshi amdgpu_sync_free(&(*mem)->sync);
1838a46a2cd1SFelix Kuehling mutex_destroy(&(*mem)->lock);
1839a872c152SPhilip Yang if (gobj)
1840a872c152SPhilip Yang drm_gem_object_put(gobj);
1841a872c152SPhilip Yang else
1842a46a2cd1SFelix Kuehling kfree(*mem);
1843b408a548SFelix Kuehling err:
1844b408a548SFelix Kuehling if (sg) {
1845b408a548SFelix Kuehling sg_free_table(sg);
1846b408a548SFelix Kuehling kfree(sg);
1847b408a548SFelix Kuehling }
1848a46a2cd1SFelix Kuehling return ret;
1849a46a2cd1SFelix Kuehling }
1850a46a2cd1SFelix Kuehling
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(struct amdgpu_device * adev,struct kgd_mem * mem,void * drm_priv,uint64_t * size)1851a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1852dff63da9SGraham Sider struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
1853d4ec4bdcSFelix Kuehling uint64_t *size)
1854a46a2cd1SFelix Kuehling {
1855a46a2cd1SFelix Kuehling struct amdkfd_process_info *process_info = mem->process_info;
18568c392cd5SDaniel Vetter unsigned long bo_size = mem->bo->tbo.base.size;
18570dc204bcSLang Yu bool use_release_notifier = (mem->bo->kfd_bo == mem);
1858c780b2eeSFelix Kuehling struct kfd_mem_attachment *entry, *tmp;
1859a46a2cd1SFelix Kuehling struct bo_vm_reservation_context ctx;
1860fe158997SBernard Zhao unsigned int mapped_to_gpu_memory;
1861a46a2cd1SFelix Kuehling int ret;
186244ea03e1SSumera Priyadarsini bool is_imported = false;
1863a46a2cd1SFelix Kuehling
1864a46a2cd1SFelix Kuehling mutex_lock(&mem->lock);
1865d25e35bcSRamesh Errabolu
18666bd8d4b7SJulia Lawall /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
1867d25e35bcSRamesh Errabolu if (mem->alloc_flags &
1868d25e35bcSRamesh Errabolu (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1869d25e35bcSRamesh Errabolu KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1870d25e35bcSRamesh Errabolu amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
1871d25e35bcSRamesh Errabolu }
1872d25e35bcSRamesh Errabolu
1873fe158997SBernard Zhao mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1874d4566deeSMukul Joshi is_imported = mem->is_imported;
1875a46a2cd1SFelix Kuehling mutex_unlock(&mem->lock);
1876a46a2cd1SFelix Kuehling /* lock is not needed after this, since mem is unused and will
1877a46a2cd1SFelix Kuehling * be freed anyway
1878a46a2cd1SFelix Kuehling */
1879a46a2cd1SFelix Kuehling
1880fe158997SBernard Zhao if (mapped_to_gpu_memory > 0) {
1881fe158997SBernard Zhao pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1882fe158997SBernard Zhao mem->va, bo_size);
1883fe158997SBernard Zhao return -EBUSY;
1884fe158997SBernard Zhao }
1885fe158997SBernard Zhao
1886a46a2cd1SFelix Kuehling /* Make sure restore workers don't access the BO any more */
1887a46a2cd1SFelix Kuehling mutex_lock(&process_info->lock);
18888abc1eb2SChristian König list_del(&mem->validate_list);
1889a46a2cd1SFelix Kuehling mutex_unlock(&process_info->lock);
1890a46a2cd1SFelix Kuehling
1891f95f51a4SFelix Kuehling /* Cleanup user pages and MMU notifiers */
1892f95f51a4SFelix Kuehling if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
1893d9483ecdSChristian König amdgpu_hmm_unregister(mem->bo);
1894f95f51a4SFelix Kuehling mutex_lock(&process_info->notifier_lock);
1895f95f51a4SFelix Kuehling amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
1896f95f51a4SFelix Kuehling mutex_unlock(&process_info->notifier_lock);
1897f95f51a4SFelix Kuehling }
1898f7646585SPhilip Yang
1899a46a2cd1SFelix Kuehling ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1900a46a2cd1SFelix Kuehling if (unlikely(ret))
1901a46a2cd1SFelix Kuehling return ret;
1902a46a2cd1SFelix Kuehling
1903a46a2cd1SFelix Kuehling amdgpu_amdkfd_remove_eviction_fence(mem->bo,
19042d086fdeSFelix Kuehling process_info->eviction_fence);
1905a46a2cd1SFelix Kuehling pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1906a46a2cd1SFelix Kuehling mem->va + bo_size * (1 + mem->aql_queue));
1907a46a2cd1SFelix Kuehling
1908a46a2cd1SFelix Kuehling /* Remove from VM internal data structures */
1909101b8104SPhilip Yang list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
1910101b8104SPhilip Yang kfd_mem_dmaunmap_attachment(mem, entry);
1911c780b2eeSFelix Kuehling kfd_mem_detach(entry);
1912101b8104SPhilip Yang }
1913a46a2cd1SFelix Kuehling
19147ef6b7f8SKent Russell ret = unreserve_bo_and_vms(&ctx, false, false);
19157ef6b7f8SKent Russell
1916a46a2cd1SFelix Kuehling /* Free the sync object */
1917a46a2cd1SFelix Kuehling amdgpu_sync_free(&mem->sync);
1918a46a2cd1SFelix Kuehling
1919d8e408a8SOak Zeng /* If the SG is not NULL, it's one we created for a doorbell or mmio
1920d8e408a8SOak Zeng * remap BO. We need to free it.
1921b408a548SFelix Kuehling */
1922b408a548SFelix Kuehling if (mem->bo->tbo.sg) {
1923b408a548SFelix Kuehling sg_free_table(mem->bo->tbo.sg);
1924b408a548SFelix Kuehling kfree(mem->bo->tbo.sg);
1925b408a548SFelix Kuehling }
1926b408a548SFelix Kuehling
1927d4566deeSMukul Joshi /* Update the size of the BO being freed if it was allocated from
1928f915f3afSHarish Kasiviswanathan * VRAM and is not imported. For APP APU VRAM allocations are done
1929f915f3afSHarish Kasiviswanathan * in GTT domain
1930d4566deeSMukul Joshi */
1931d4566deeSMukul Joshi if (size) {
1932f915f3afSHarish Kasiviswanathan if (!is_imported &&
1933f915f3afSHarish Kasiviswanathan (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
19348b0d068eSAlex Deucher (adev->apu_prefer_gtt &&
1935f915f3afSHarish Kasiviswanathan mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
1936d4566deeSMukul Joshi *size = bo_size;
1937d4566deeSMukul Joshi else
1938d4566deeSMukul Joshi *size = 0;
1939d4566deeSMukul Joshi }
1940d4566deeSMukul Joshi
1941a46a2cd1SFelix Kuehling /* Free the BO*/
1942d4ec4bdcSFelix Kuehling drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
194318192001SFelix Kuehling drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle);
194418192001SFelix Kuehling if (mem->dmabuf) {
19455ac3c3e4SFelix Kuehling dma_buf_put(mem->dmabuf);
194618192001SFelix Kuehling mem->dmabuf = NULL;
194718192001SFelix Kuehling }
1948a46a2cd1SFelix Kuehling mutex_destroy(&mem->lock);
19495702d052SFelix Kuehling
19505702d052SFelix Kuehling /* If this releases the last reference, it will end up calling
19515702d052SFelix Kuehling * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
19525702d052SFelix Kuehling * this needs to be the last call here.
19535702d052SFelix Kuehling */
19545702d052SFelix Kuehling drm_gem_object_put(&mem->bo->tbo.base);
1955a46a2cd1SFelix Kuehling
19560dc204bcSLang Yu /*
19570dc204bcSLang Yu * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(),
19580dc204bcSLang Yu * explicitly free it here.
19590dc204bcSLang Yu */
19600dc204bcSLang Yu if (!use_release_notifier)
19610dc204bcSLang Yu kfree(mem);
19620dc204bcSLang Yu
1963a46a2cd1SFelix Kuehling return ret;
1964a46a2cd1SFelix Kuehling }
1965a46a2cd1SFelix Kuehling
amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device * adev,struct kgd_mem * mem,void * drm_priv)1966a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1967dff63da9SGraham Sider struct amdgpu_device *adev, struct kgd_mem *mem,
19684d30a83cSChristian König void *drm_priv)
1969a46a2cd1SFelix Kuehling {
1970b40a6ab2SFelix Kuehling struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1971a46a2cd1SFelix Kuehling int ret;
1972a46a2cd1SFelix Kuehling struct amdgpu_bo *bo;
1973a46a2cd1SFelix Kuehling uint32_t domain;
1974c780b2eeSFelix Kuehling struct kfd_mem_attachment *entry;
1975a46a2cd1SFelix Kuehling struct bo_vm_reservation_context ctx;
1976a46a2cd1SFelix Kuehling unsigned long bo_size;
19775ae0283eSFelix Kuehling bool is_invalid_userptr = false;
1978a46a2cd1SFelix Kuehling
19795ae0283eSFelix Kuehling bo = mem->bo;
19805ae0283eSFelix Kuehling if (!bo) {
19815ae0283eSFelix Kuehling pr_err("Invalid BO when mapping memory to GPU\n");
19825ae0283eSFelix Kuehling return -EINVAL;
19835ae0283eSFelix Kuehling }
19845ae0283eSFelix Kuehling
19855ae0283eSFelix Kuehling /* Make sure restore is not running concurrently. Since we
19865ae0283eSFelix Kuehling * don't map invalid userptr BOs, we rely on the next restore
19875ae0283eSFelix Kuehling * worker to do the mapping
1988a46a2cd1SFelix Kuehling */
1989a46a2cd1SFelix Kuehling mutex_lock(&mem->process_info->lock);
1990a46a2cd1SFelix Kuehling
1991f95f51a4SFelix Kuehling /* Lock notifier lock. If we find an invalid userptr BO, we can be
1992f95f51a4SFelix Kuehling * sure that the MMU notifier is no longer running
1993f95f51a4SFelix Kuehling * concurrently and the queues are actually stopped
1994f95f51a4SFelix Kuehling */
1995f95f51a4SFelix Kuehling if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1996f95f51a4SFelix Kuehling mutex_lock(&mem->process_info->notifier_lock);
1997f95f51a4SFelix Kuehling is_invalid_userptr = !!mem->invalid;
1998f95f51a4SFelix Kuehling mutex_unlock(&mem->process_info->notifier_lock);
1999f95f51a4SFelix Kuehling }
2000f95f51a4SFelix Kuehling
20015ae0283eSFelix Kuehling mutex_lock(&mem->lock);
20025ae0283eSFelix Kuehling
2003a46a2cd1SFelix Kuehling domain = mem->domain;
20048c392cd5SDaniel Vetter bo_size = bo->tbo.base.size;
2005a46a2cd1SFelix Kuehling
2006a46a2cd1SFelix Kuehling pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
2007a46a2cd1SFelix Kuehling mem->va,
2008a46a2cd1SFelix Kuehling mem->va + bo_size * (1 + mem->aql_queue),
2009b40a6ab2SFelix Kuehling avm, domain_string(domain));
2010a46a2cd1SFelix Kuehling
20119e5d2753SFelix Kuehling if (!kfd_mem_is_attached(avm, mem)) {
20129e5d2753SFelix Kuehling ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
20139e5d2753SFelix Kuehling if (ret)
20149e5d2753SFelix Kuehling goto out;
20159e5d2753SFelix Kuehling }
20169e5d2753SFelix Kuehling
2017b40a6ab2SFelix Kuehling ret = reserve_bo_and_vm(mem, avm, &ctx);
2018a46a2cd1SFelix Kuehling if (unlikely(ret))
2019a46a2cd1SFelix Kuehling goto out;
2020a46a2cd1SFelix Kuehling
20215ae0283eSFelix Kuehling /* Userptr can be marked as "not invalid", but not actually be
20225ae0283eSFelix Kuehling * validated yet (still in the system domain). In that case
20235ae0283eSFelix Kuehling * the queues are still stopped and we can leave mapping for
20245ae0283eSFelix Kuehling * the next restore worker
20255ae0283eSFelix Kuehling */
20260f04e538SFelix Kuehling if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
2027d3116756SChristian König bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
20285ae0283eSFelix Kuehling is_invalid_userptr = true;
20295ae0283eSFelix Kuehling
203050661eb1SFelix Kuehling ret = vm_validate_pt_pd_bos(avm, NULL);
2031a46a2cd1SFelix Kuehling if (unlikely(ret))
20329e5d2753SFelix Kuehling goto out_unreserve;
2033a46a2cd1SFelix Kuehling
2034c780b2eeSFelix Kuehling list_for_each_entry(entry, &mem->attachments, list) {
2035c780b2eeSFelix Kuehling if (entry->bo_va->base.vm != avm || entry->is_mapped)
2036c780b2eeSFelix Kuehling continue;
2037c780b2eeSFelix Kuehling
2038a46a2cd1SFelix Kuehling pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
2039c780b2eeSFelix Kuehling entry->va, entry->va + bo_size, entry);
2040a46a2cd1SFelix Kuehling
2041b72ed8a2SFelix Kuehling ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
20424d30a83cSChristian König is_invalid_userptr);
2043a46a2cd1SFelix Kuehling if (ret) {
20440d87c9cfSKent Russell pr_err("Failed to map bo to gpuvm\n");
20459e5d2753SFelix Kuehling goto out_unreserve;
2046a46a2cd1SFelix Kuehling }
2047a46a2cd1SFelix Kuehling
2048b40a6ab2SFelix Kuehling ret = vm_update_pds(avm, ctx.sync);
2049a46a2cd1SFelix Kuehling if (ret) {
2050a46a2cd1SFelix Kuehling pr_err("Failed to update page directories\n");
20519e5d2753SFelix Kuehling goto out_unreserve;
2052a46a2cd1SFelix Kuehling }
2053a46a2cd1SFelix Kuehling
2054a46a2cd1SFelix Kuehling entry->is_mapped = true;
2055a46a2cd1SFelix Kuehling mem->mapped_to_gpu_memory++;
2056a46a2cd1SFelix Kuehling pr_debug("\t INC mapping count %d\n",
2057a46a2cd1SFelix Kuehling mem->mapped_to_gpu_memory);
2058a46a2cd1SFelix Kuehling }
2059a46a2cd1SFelix Kuehling
2060a46a2cd1SFelix Kuehling ret = unreserve_bo_and_vms(&ctx, false, false);
2061a46a2cd1SFelix Kuehling
2062a46a2cd1SFelix Kuehling goto out;
2063a46a2cd1SFelix Kuehling
20649e5d2753SFelix Kuehling out_unreserve:
2065a46a2cd1SFelix Kuehling unreserve_bo_and_vms(&ctx, false, false);
2066a46a2cd1SFelix Kuehling out:
2067a46a2cd1SFelix Kuehling mutex_unlock(&mem->process_info->lock);
2068a46a2cd1SFelix Kuehling mutex_unlock(&mem->lock);
2069a46a2cd1SFelix Kuehling return ret;
2070a46a2cd1SFelix Kuehling }
2071a46a2cd1SFelix Kuehling
amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem * mem,void * drm_priv)20720c93bd49SLang Yu int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
2073101b8104SPhilip Yang {
2074101b8104SPhilip Yang struct kfd_mem_attachment *entry;
2075101b8104SPhilip Yang struct amdgpu_vm *vm;
20760c93bd49SLang Yu int ret;
2077101b8104SPhilip Yang
2078101b8104SPhilip Yang vm = drm_priv_to_vm(drm_priv);
2079101b8104SPhilip Yang
2080101b8104SPhilip Yang mutex_lock(&mem->lock);
2081101b8104SPhilip Yang
20820c93bd49SLang Yu ret = amdgpu_bo_reserve(mem->bo, true);
20830c93bd49SLang Yu if (ret)
20840c93bd49SLang Yu goto out;
20850c93bd49SLang Yu
2086101b8104SPhilip Yang list_for_each_entry(entry, &mem->attachments, list) {
20870c93bd49SLang Yu if (entry->bo_va->base.vm != vm)
20880c93bd49SLang Yu continue;
20890c93bd49SLang Yu if (entry->bo_va->base.bo->tbo.ttm &&
20900c93bd49SLang Yu !entry->bo_va->base.bo->tbo.ttm->sg)
20910c93bd49SLang Yu continue;
20920c93bd49SLang Yu
2093101b8104SPhilip Yang kfd_mem_dmaunmap_attachment(mem, entry);
2094101b8104SPhilip Yang }
2095101b8104SPhilip Yang
20960c93bd49SLang Yu amdgpu_bo_unreserve(mem->bo);
20970c93bd49SLang Yu out:
2098101b8104SPhilip Yang mutex_unlock(&mem->lock);
20990c93bd49SLang Yu
21000c93bd49SLang Yu return ret;
2101101b8104SPhilip Yang }
2102101b8104SPhilip Yang
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(struct amdgpu_device * adev,struct kgd_mem * mem,void * drm_priv)2103a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
2104dff63da9SGraham Sider struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
2105a46a2cd1SFelix Kuehling {
2106b40a6ab2SFelix Kuehling struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
21078c392cd5SDaniel Vetter unsigned long bo_size = mem->bo->tbo.base.size;
2108c780b2eeSFelix Kuehling struct kfd_mem_attachment *entry;
2109a46a2cd1SFelix Kuehling struct bo_vm_reservation_context ctx;
2110a46a2cd1SFelix Kuehling int ret;
2111a46a2cd1SFelix Kuehling
2112a46a2cd1SFelix Kuehling mutex_lock(&mem->lock);
2113a46a2cd1SFelix Kuehling
2114b40a6ab2SFelix Kuehling ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
2115a46a2cd1SFelix Kuehling if (unlikely(ret))
2116a46a2cd1SFelix Kuehling goto out;
2117a46a2cd1SFelix Kuehling /* If no VMs were reserved, it means the BO wasn't actually mapped */
2118a46a2cd1SFelix Kuehling if (ctx.n_vms == 0) {
2119a46a2cd1SFelix Kuehling ret = -EINVAL;
2120a46a2cd1SFelix Kuehling goto unreserve_out;
2121a46a2cd1SFelix Kuehling }
2122a46a2cd1SFelix Kuehling
212350661eb1SFelix Kuehling ret = vm_validate_pt_pd_bos(avm, NULL);
2124a46a2cd1SFelix Kuehling if (unlikely(ret))
2125a46a2cd1SFelix Kuehling goto unreserve_out;
2126a46a2cd1SFelix Kuehling
2127a46a2cd1SFelix Kuehling pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
2128a46a2cd1SFelix Kuehling mem->va,
2129a46a2cd1SFelix Kuehling mem->va + bo_size * (1 + mem->aql_queue),
2130b40a6ab2SFelix Kuehling avm);
2131a46a2cd1SFelix Kuehling
2132c780b2eeSFelix Kuehling list_for_each_entry(entry, &mem->attachments, list) {
2133c780b2eeSFelix Kuehling if (entry->bo_va->base.vm != avm || !entry->is_mapped)
2134c780b2eeSFelix Kuehling continue;
2135c780b2eeSFelix Kuehling
2136a46a2cd1SFelix Kuehling pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
2137c780b2eeSFelix Kuehling entry->va, entry->va + bo_size, entry);
2138a46a2cd1SFelix Kuehling
2139834368eaSPhilip Yang ret = unmap_bo_from_gpuvm(mem, entry, ctx.sync);
2140834368eaSPhilip Yang if (ret)
2141834368eaSPhilip Yang goto unreserve_out;
2142834368eaSPhilip Yang
2143a46a2cd1SFelix Kuehling entry->is_mapped = false;
2144a46a2cd1SFelix Kuehling
2145a46a2cd1SFelix Kuehling mem->mapped_to_gpu_memory--;
2146a46a2cd1SFelix Kuehling pr_debug("\t DEC mapping count %d\n",
2147a46a2cd1SFelix Kuehling mem->mapped_to_gpu_memory);
2148a46a2cd1SFelix Kuehling }
2149a46a2cd1SFelix Kuehling
2150a46a2cd1SFelix Kuehling unreserve_out:
2151a46a2cd1SFelix Kuehling unreserve_bo_and_vms(&ctx, false, false);
2152a46a2cd1SFelix Kuehling out:
2153a46a2cd1SFelix Kuehling mutex_unlock(&mem->lock);
2154a46a2cd1SFelix Kuehling return ret;
2155a46a2cd1SFelix Kuehling }
2156a46a2cd1SFelix Kuehling
amdgpu_amdkfd_gpuvm_sync_memory(struct amdgpu_device * adev,struct kgd_mem * mem,bool intr)2157a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_sync_memory(
2158dff63da9SGraham Sider struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
2159a46a2cd1SFelix Kuehling {
2160a46a2cd1SFelix Kuehling struct amdgpu_sync sync;
2161a46a2cd1SFelix Kuehling int ret;
2162a46a2cd1SFelix Kuehling
2163a46a2cd1SFelix Kuehling amdgpu_sync_create(&sync);
2164a46a2cd1SFelix Kuehling
2165a46a2cd1SFelix Kuehling mutex_lock(&mem->lock);
2166a46a2cd1SFelix Kuehling amdgpu_sync_clone(&mem->sync, &sync);
2167a46a2cd1SFelix Kuehling mutex_unlock(&mem->lock);
2168a46a2cd1SFelix Kuehling
2169a46a2cd1SFelix Kuehling ret = amdgpu_sync_wait(&sync, intr);
2170a46a2cd1SFelix Kuehling amdgpu_sync_free(&sync);
2171a46a2cd1SFelix Kuehling return ret;
2172a46a2cd1SFelix Kuehling }
2173a46a2cd1SFelix Kuehling
2174e77a541fSGraham Sider /**
2175e77a541fSGraham Sider * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
2176e77a541fSGraham Sider * @bo: Buffer object to be mapped
2177fb910658SPhilip Yang * @bo_gart: Return bo reference
2178e77a541fSGraham Sider *
2179e77a541fSGraham Sider * Before return, bo reference count is incremented. To release the reference and unpin/
2180e77a541fSGraham Sider * unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
2181e77a541fSGraham Sider */
amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo * bo,struct amdgpu_bo ** bo_gart)2182fb910658SPhilip Yang int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart)
2183e77a541fSGraham Sider {
2184e77a541fSGraham Sider int ret;
2185e77a541fSGraham Sider
2186e77a541fSGraham Sider ret = amdgpu_bo_reserve(bo, true);
2187e77a541fSGraham Sider if (ret) {
2188e77a541fSGraham Sider pr_err("Failed to reserve bo. ret %d\n", ret);
2189e77a541fSGraham Sider goto err_reserve_bo_failed;
2190e77a541fSGraham Sider }
2191e77a541fSGraham Sider
2192e77a541fSGraham Sider ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2193e77a541fSGraham Sider if (ret) {
2194e77a541fSGraham Sider pr_err("Failed to pin bo. ret %d\n", ret);
2195e77a541fSGraham Sider goto err_pin_bo_failed;
2196e77a541fSGraham Sider }
2197e77a541fSGraham Sider
2198e77a541fSGraham Sider ret = amdgpu_ttm_alloc_gart(&bo->tbo);
2199e77a541fSGraham Sider if (ret) {
2200e77a541fSGraham Sider pr_err("Failed to bind bo to GART. ret %d\n", ret);
2201e77a541fSGraham Sider goto err_map_bo_gart_failed;
2202e77a541fSGraham Sider }
2203e77a541fSGraham Sider
2204e77a541fSGraham Sider amdgpu_amdkfd_remove_eviction_fence(
22057f347e3fSEric Huang bo, bo->vm_bo->vm->process_info->eviction_fence);
2206e77a541fSGraham Sider
2207e77a541fSGraham Sider amdgpu_bo_unreserve(bo);
2208e77a541fSGraham Sider
2209fb910658SPhilip Yang *bo_gart = amdgpu_bo_ref(bo);
2210e77a541fSGraham Sider
2211e77a541fSGraham Sider return 0;
2212e77a541fSGraham Sider
2213e77a541fSGraham Sider err_map_bo_gart_failed:
2214e77a541fSGraham Sider amdgpu_bo_unpin(bo);
2215e77a541fSGraham Sider err_pin_bo_failed:
2216e77a541fSGraham Sider amdgpu_bo_unreserve(bo);
2217e77a541fSGraham Sider err_reserve_bo_failed:
2218e77a541fSGraham Sider
2219e77a541fSGraham Sider return ret;
2220e77a541fSGraham Sider }
2221e77a541fSGraham Sider
22224e2d1044SFelix Kuehling /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
22234e2d1044SFelix Kuehling *
22244e2d1044SFelix Kuehling * @mem: Buffer object to be mapped for CPU access
22254e2d1044SFelix Kuehling * @kptr[out]: pointer in kernel CPU address space
22264e2d1044SFelix Kuehling * @size[out]: size of the buffer
22274e2d1044SFelix Kuehling *
22284e2d1044SFelix Kuehling * Pins the BO and maps it for kernel CPU access. The eviction fence is removed
22294e2d1044SFelix Kuehling * from the BO, since pinned BOs cannot be evicted. The bo must remain on the
22304e2d1044SFelix Kuehling * validate_list, so the GPU mapping can be restored after a page table was
22314e2d1044SFelix Kuehling * evicted.
22324e2d1044SFelix Kuehling *
22334e2d1044SFelix Kuehling * Return: 0 on success, error code on failure
22344e2d1044SFelix Kuehling */
amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem * mem,void ** kptr,uint64_t * size)22354e2d1044SFelix Kuehling int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
22364e2d1044SFelix Kuehling void **kptr, uint64_t *size)
2237a46a2cd1SFelix Kuehling {
2238a46a2cd1SFelix Kuehling int ret;
2239a46a2cd1SFelix Kuehling struct amdgpu_bo *bo = mem->bo;
2240a46a2cd1SFelix Kuehling
2241a46a2cd1SFelix Kuehling if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2242a46a2cd1SFelix Kuehling pr_err("userptr can't be mapped to kernel\n");
2243a46a2cd1SFelix Kuehling return -EINVAL;
2244a46a2cd1SFelix Kuehling }
2245a46a2cd1SFelix Kuehling
2246a46a2cd1SFelix Kuehling mutex_lock(&mem->process_info->lock);
2247a46a2cd1SFelix Kuehling
2248a46a2cd1SFelix Kuehling ret = amdgpu_bo_reserve(bo, true);
2249a46a2cd1SFelix Kuehling if (ret) {
2250a46a2cd1SFelix Kuehling pr_err("Failed to reserve bo. ret %d\n", ret);
2251a46a2cd1SFelix Kuehling goto bo_reserve_failed;
2252a46a2cd1SFelix Kuehling }
2253a46a2cd1SFelix Kuehling
22547b7c6c81SJunwei Zhang ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2255a46a2cd1SFelix Kuehling if (ret) {
2256a46a2cd1SFelix Kuehling pr_err("Failed to pin bo. ret %d\n", ret);
2257a46a2cd1SFelix Kuehling goto pin_failed;
2258a46a2cd1SFelix Kuehling }
2259a46a2cd1SFelix Kuehling
2260a46a2cd1SFelix Kuehling ret = amdgpu_bo_kmap(bo, kptr);
2261a46a2cd1SFelix Kuehling if (ret) {
2262a46a2cd1SFelix Kuehling pr_err("Failed to map bo to kernel. ret %d\n", ret);
2263a46a2cd1SFelix Kuehling goto kmap_failed;
2264a46a2cd1SFelix Kuehling }
2265a46a2cd1SFelix Kuehling
2266a46a2cd1SFelix Kuehling amdgpu_amdkfd_remove_eviction_fence(
22672d086fdeSFelix Kuehling bo, mem->process_info->eviction_fence);
2268a46a2cd1SFelix Kuehling
2269a46a2cd1SFelix Kuehling if (size)
2270a46a2cd1SFelix Kuehling *size = amdgpu_bo_size(bo);
2271a46a2cd1SFelix Kuehling
2272a46a2cd1SFelix Kuehling amdgpu_bo_unreserve(bo);
2273a46a2cd1SFelix Kuehling
2274a46a2cd1SFelix Kuehling mutex_unlock(&mem->process_info->lock);
2275a46a2cd1SFelix Kuehling return 0;
2276a46a2cd1SFelix Kuehling
2277a46a2cd1SFelix Kuehling kmap_failed:
2278a46a2cd1SFelix Kuehling amdgpu_bo_unpin(bo);
2279a46a2cd1SFelix Kuehling pin_failed:
2280a46a2cd1SFelix Kuehling amdgpu_bo_unreserve(bo);
2281a46a2cd1SFelix Kuehling bo_reserve_failed:
2282a46a2cd1SFelix Kuehling mutex_unlock(&mem->process_info->lock);
2283a46a2cd1SFelix Kuehling
2284a46a2cd1SFelix Kuehling return ret;
2285a46a2cd1SFelix Kuehling }
2286a46a2cd1SFelix Kuehling
22874e2d1044SFelix Kuehling /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
22884e2d1044SFelix Kuehling *
22894e2d1044SFelix Kuehling * @mem: Buffer object to be unmapped for CPU access
22904e2d1044SFelix Kuehling *
22914e2d1044SFelix Kuehling * Removes the kernel CPU mapping and unpins the BO. It does not restore the
22924e2d1044SFelix Kuehling * eviction fence, so this function should only be used for cleanup before the
22934e2d1044SFelix Kuehling * BO is destroyed.
22944e2d1044SFelix Kuehling */
amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem * mem)22954e2d1044SFelix Kuehling void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
229668df0f19SLang Yu {
229768df0f19SLang Yu struct amdgpu_bo *bo = mem->bo;
229868df0f19SLang Yu
2299357ef5b3SAndrew Martin (void)amdgpu_bo_reserve(bo, true);
230068df0f19SLang Yu amdgpu_bo_kunmap(bo);
230168df0f19SLang Yu amdgpu_bo_unpin(bo);
230268df0f19SLang Yu amdgpu_bo_unreserve(bo);
230368df0f19SLang Yu }
230468df0f19SLang Yu
amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device * adev,struct kfd_vm_fault_info * mem)2305dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
2306b97dfa27Sshaoyunl struct kfd_vm_fault_info *mem)
2307b97dfa27Sshaoyunl {
2308b97dfa27Sshaoyunl if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
2309b97dfa27Sshaoyunl *mem = *adev->gmc.vm_fault_info;
2310e7c94bfbSJingyu Wang mb(); /* make sure read happened */
2311b97dfa27Sshaoyunl atomic_set(&adev->gmc.vm_fault_info_updated, 0);
2312b97dfa27Sshaoyunl }
2313b97dfa27Sshaoyunl return 0;
2314b97dfa27Sshaoyunl }
2315b97dfa27Sshaoyunl
import_obj_create(struct amdgpu_device * adev,struct dma_buf * dma_buf,struct drm_gem_object * obj,uint64_t va,void * drm_priv,struct kgd_mem ** mem,uint64_t * size,uint64_t * mmap_offset)23160188006dSFelix Kuehling static int import_obj_create(struct amdgpu_device *adev,
23171dde0ea9SFelix Kuehling struct dma_buf *dma_buf,
23180188006dSFelix Kuehling struct drm_gem_object *obj,
2319b40a6ab2SFelix Kuehling uint64_t va, void *drm_priv,
23201dde0ea9SFelix Kuehling struct kgd_mem **mem, uint64_t *size,
23211dde0ea9SFelix Kuehling uint64_t *mmap_offset)
23221dde0ea9SFelix Kuehling {
2323b40a6ab2SFelix Kuehling struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
23241dde0ea9SFelix Kuehling struct amdgpu_bo *bo;
2325d4ec4bdcSFelix Kuehling int ret;
23261dde0ea9SFelix Kuehling
23271dde0ea9SFelix Kuehling bo = gem_to_amdgpu_bo(obj);
23281dde0ea9SFelix Kuehling if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
23290188006dSFelix Kuehling AMDGPU_GEM_DOMAIN_GTT)))
23301dde0ea9SFelix Kuehling /* Only VRAM and GTT BOs are supported */
23310188006dSFelix Kuehling return -EINVAL;
23321dde0ea9SFelix Kuehling
23331dde0ea9SFelix Kuehling *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
23340188006dSFelix Kuehling if (!*mem)
23350188006dSFelix Kuehling return -ENOMEM;
23361dde0ea9SFelix Kuehling
2337d4ec4bdcSFelix Kuehling ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
23382d81c4cdSFelix Kuehling if (ret)
23392d81c4cdSFelix Kuehling goto err_free_mem;
2340d4ec4bdcSFelix Kuehling
23411dde0ea9SFelix Kuehling if (size)
23421dde0ea9SFelix Kuehling *size = amdgpu_bo_size(bo);
23431dde0ea9SFelix Kuehling
23441dde0ea9SFelix Kuehling if (mmap_offset)
23451dde0ea9SFelix Kuehling *mmap_offset = amdgpu_bo_mmap_offset(bo);
23461dde0ea9SFelix Kuehling
2347c780b2eeSFelix Kuehling INIT_LIST_HEAD(&(*mem)->attachments);
23481dde0ea9SFelix Kuehling mutex_init(&(*mem)->lock);
23491d251d90SYong Zhao
2350d0ba51b1SFelix Kuehling (*mem)->alloc_flags =
2351d0ba51b1SFelix Kuehling ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
23521d251d90SYong Zhao KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
23531d251d90SYong Zhao | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
23541d251d90SYong Zhao | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
23551dde0ea9SFelix Kuehling
23562d81c4cdSFelix Kuehling get_dma_buf(dma_buf);
23572d81c4cdSFelix Kuehling (*mem)->dmabuf = dma_buf;
2358fd9a9f88SFelix Kuehling (*mem)->bo = bo;
23591dde0ea9SFelix Kuehling (*mem)->va = va;
236089773b85SLang Yu (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
23618b0d068eSAlex Deucher !adev->apu_prefer_gtt ?
23621dde0ea9SFelix Kuehling AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
2363228ce176SRajneesh Bhardwaj
23641dde0ea9SFelix Kuehling (*mem)->mapped_to_gpu_memory = 0;
23651dde0ea9SFelix Kuehling (*mem)->process_info = avm->process_info;
23661dde0ea9SFelix Kuehling add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
23671dde0ea9SFelix Kuehling amdgpu_sync_create(&(*mem)->sync);
2368d4566deeSMukul Joshi (*mem)->is_imported = true;
23691dde0ea9SFelix Kuehling
23700e2e7c5bSFelix Kuehling mutex_lock(&avm->process_info->lock);
23710e2e7c5bSFelix Kuehling if (avm->process_info->eviction_fence &&
23720e2e7c5bSFelix Kuehling !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
23730e2e7c5bSFelix Kuehling ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain,
23740e2e7c5bSFelix Kuehling &avm->process_info->eviction_fence->base);
23750e2e7c5bSFelix Kuehling mutex_unlock(&avm->process_info->lock);
23760e2e7c5bSFelix Kuehling if (ret)
23770e2e7c5bSFelix Kuehling goto err_remove_mem;
23780e2e7c5bSFelix Kuehling
23791dde0ea9SFelix Kuehling return 0;
23802d81c4cdSFelix Kuehling
23810e2e7c5bSFelix Kuehling err_remove_mem:
23820e2e7c5bSFelix Kuehling remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
23830e2e7c5bSFelix Kuehling drm_vma_node_revoke(&obj->vma_node, drm_priv);
23842d81c4cdSFelix Kuehling err_free_mem:
23852d81c4cdSFelix Kuehling kfree(*mem);
23860188006dSFelix Kuehling return ret;
23870188006dSFelix Kuehling }
23880188006dSFelix Kuehling
amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device * adev,int fd,uint64_t va,void * drm_priv,struct kgd_mem ** mem,uint64_t * size,uint64_t * mmap_offset)23890188006dSFelix Kuehling int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
23900188006dSFelix Kuehling uint64_t va, void *drm_priv,
23910188006dSFelix Kuehling struct kgd_mem **mem, uint64_t *size,
23920188006dSFelix Kuehling uint64_t *mmap_offset)
23930188006dSFelix Kuehling {
23940188006dSFelix Kuehling struct drm_gem_object *obj;
23950188006dSFelix Kuehling uint32_t handle;
23960188006dSFelix Kuehling int ret;
23970188006dSFelix Kuehling
23980188006dSFelix Kuehling ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd,
23990188006dSFelix Kuehling &handle);
24000188006dSFelix Kuehling if (ret)
24010188006dSFelix Kuehling return ret;
24020188006dSFelix Kuehling obj = drm_gem_object_lookup(adev->kfd.client.file, handle);
24030188006dSFelix Kuehling if (!obj) {
24040188006dSFelix Kuehling ret = -EINVAL;
24050188006dSFelix Kuehling goto err_release_handle;
24060188006dSFelix Kuehling }
24070188006dSFelix Kuehling
24080188006dSFelix Kuehling ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size,
24090188006dSFelix Kuehling mmap_offset);
24100188006dSFelix Kuehling if (ret)
24110188006dSFelix Kuehling goto err_put_obj;
24120188006dSFelix Kuehling
24130188006dSFelix Kuehling (*mem)->gem_handle = handle;
24140188006dSFelix Kuehling
24150188006dSFelix Kuehling return 0;
24160188006dSFelix Kuehling
24172d81c4cdSFelix Kuehling err_put_obj:
24182d81c4cdSFelix Kuehling drm_gem_object_put(obj);
24190188006dSFelix Kuehling err_release_handle:
24200188006dSFelix Kuehling drm_gem_handle_delete(adev->kfd.client.file, handle);
24212d81c4cdSFelix Kuehling return ret;
24221dde0ea9SFelix Kuehling }
24231dde0ea9SFelix Kuehling
amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem * mem,struct dma_buf ** dma_buf)2424fd234e75SFelix Kuehling int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
2425fd234e75SFelix Kuehling struct dma_buf **dma_buf)
2426fd234e75SFelix Kuehling {
2427fd234e75SFelix Kuehling int ret;
2428fd234e75SFelix Kuehling
2429fd234e75SFelix Kuehling mutex_lock(&mem->lock);
2430fd234e75SFelix Kuehling ret = kfd_mem_export_dmabuf(mem);
2431fd234e75SFelix Kuehling if (ret)
2432fd234e75SFelix Kuehling goto out;
2433fd234e75SFelix Kuehling
2434fd234e75SFelix Kuehling get_dma_buf(mem->dmabuf);
2435fd234e75SFelix Kuehling *dma_buf = mem->dmabuf;
2436fd234e75SFelix Kuehling out:
2437fd234e75SFelix Kuehling mutex_unlock(&mem->lock);
2438fd234e75SFelix Kuehling return ret;
2439fd234e75SFelix Kuehling }
2440fd234e75SFelix Kuehling
24415ae0283eSFelix Kuehling /* Evict a userptr BO by stopping the queues if necessary
24425ae0283eSFelix Kuehling *
24435ae0283eSFelix Kuehling * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
24445ae0283eSFelix Kuehling * cannot do any memory allocations, and cannot take any locks that
2445f95f51a4SFelix Kuehling * are held elsewhere while allocating memory.
24465ae0283eSFelix Kuehling *
24475ae0283eSFelix Kuehling * It doesn't do anything to the BO itself. The real work happens in
24485ae0283eSFelix Kuehling * restore, where we get updated page addresses. This function only
24495ae0283eSFelix Kuehling * ensures that GPU access to the BO is stopped.
24505ae0283eSFelix Kuehling */
amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier * mni,unsigned long cur_seq,struct kgd_mem * mem)2451f95f51a4SFelix Kuehling int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
2452f95f51a4SFelix Kuehling unsigned long cur_seq, struct kgd_mem *mem)
2453e52482deSFelix Kuehling {
24545ae0283eSFelix Kuehling struct amdkfd_process_info *process_info = mem->process_info;
24555ae0283eSFelix Kuehling int r = 0;
24565ae0283eSFelix Kuehling
2457f95f51a4SFelix Kuehling /* Do not process MMU notifications during CRIU restore until
2458f95f51a4SFelix Kuehling * KFD_CRIU_OP_RESUME IOCTL is received
2459f95f51a4SFelix Kuehling */
2460011bbb03SRajneesh Bhardwaj if (READ_ONCE(process_info->block_mmu_notifications))
2461011bbb03SRajneesh Bhardwaj return 0;
2462011bbb03SRajneesh Bhardwaj
2463f95f51a4SFelix Kuehling mutex_lock(&process_info->notifier_lock);
2464f95f51a4SFelix Kuehling mmu_interval_set_seq(mni, cur_seq);
2465f95f51a4SFelix Kuehling
2466f95f51a4SFelix Kuehling mem->invalid++;
2467f95f51a4SFelix Kuehling if (++process_info->evicted_bos == 1) {
24685ae0283eSFelix Kuehling /* First eviction, stop the queues */
2469f95f51a4SFelix Kuehling r = kgd2kfd_quiesce_mm(mni->mm,
2470f95f51a4SFelix Kuehling KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
247110112bf8SXiaogang Chen
247210112bf8SXiaogang Chen if (r && r != -ESRCH)
24735ae0283eSFelix Kuehling pr_err("Failed to quiesce KFD\n");
247410112bf8SXiaogang Chen
247510112bf8SXiaogang Chen if (r != -ESRCH)
24769a1c1339SFelix Kuehling queue_delayed_work(system_freezable_wq,
24779a1c1339SFelix Kuehling &process_info->restore_userptr_work,
24785ae0283eSFelix Kuehling msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
24795ae0283eSFelix Kuehling }
2480f95f51a4SFelix Kuehling mutex_unlock(&process_info->notifier_lock);
24815ae0283eSFelix Kuehling
24825ae0283eSFelix Kuehling return r;
24835ae0283eSFelix Kuehling }
24845ae0283eSFelix Kuehling
24855ae0283eSFelix Kuehling /* Update invalid userptr BOs
24865ae0283eSFelix Kuehling *
24875ae0283eSFelix Kuehling * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
24885ae0283eSFelix Kuehling * userptr_inval_list and updates user pages for all BOs that have
24895ae0283eSFelix Kuehling * been invalidated since their last update.
24905ae0283eSFelix Kuehling */
update_invalid_user_pages(struct amdkfd_process_info * process_info,struct mm_struct * mm)24915ae0283eSFelix Kuehling static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
24925ae0283eSFelix Kuehling struct mm_struct *mm)
24935ae0283eSFelix Kuehling {
24945ae0283eSFelix Kuehling struct kgd_mem *mem, *tmp_mem;
24955ae0283eSFelix Kuehling struct amdgpu_bo *bo;
24965ae0283eSFelix Kuehling struct ttm_operation_ctx ctx = { false, false };
2497f95f51a4SFelix Kuehling uint32_t invalid;
2498f95f51a4SFelix Kuehling int ret = 0;
24995ae0283eSFelix Kuehling
2500f95f51a4SFelix Kuehling mutex_lock(&process_info->notifier_lock);
2501f95f51a4SFelix Kuehling
2502f95f51a4SFelix Kuehling /* Move all invalidated BOs to the userptr_inval_list */
25035ae0283eSFelix Kuehling list_for_each_entry_safe(mem, tmp_mem,
25045ae0283eSFelix Kuehling &process_info->userptr_valid_list,
25058abc1eb2SChristian König validate_list)
2506f95f51a4SFelix Kuehling if (mem->invalid)
25078abc1eb2SChristian König list_move_tail(&mem->validate_list,
2508f95f51a4SFelix Kuehling &process_info->userptr_inval_list);
2509f95f51a4SFelix Kuehling
2510f95f51a4SFelix Kuehling /* Go through userptr_inval_list and update any invalid user_pages */
2511f95f51a4SFelix Kuehling list_for_each_entry(mem, &process_info->userptr_inval_list,
25128abc1eb2SChristian König validate_list) {
2513f95f51a4SFelix Kuehling invalid = mem->invalid;
2514f95f51a4SFelix Kuehling if (!invalid)
2515f95f51a4SFelix Kuehling /* BO hasn't been invalidated since the last
2516f95f51a4SFelix Kuehling * revalidation attempt. Keep its page list.
2517f95f51a4SFelix Kuehling */
2518f95f51a4SFelix Kuehling continue;
25195ae0283eSFelix Kuehling
25205ae0283eSFelix Kuehling bo = mem->bo;
25215ae0283eSFelix Kuehling
2522f95f51a4SFelix Kuehling amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
2523f95f51a4SFelix Kuehling mem->range = NULL;
2524f95f51a4SFelix Kuehling
2525f95f51a4SFelix Kuehling /* BO reservations and getting user pages (hmm_range_fault)
2526f95f51a4SFelix Kuehling * must happen outside the notifier lock
2527f95f51a4SFelix Kuehling */
2528f95f51a4SFelix Kuehling mutex_unlock(&process_info->notifier_lock);
2529f95f51a4SFelix Kuehling
2530f95f51a4SFelix Kuehling /* Move the BO to system (CPU) domain if necessary to unmap
2531f95f51a4SFelix Kuehling * and free the SG table
2532f95f51a4SFelix Kuehling */
2533f95f51a4SFelix Kuehling if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
25345ae0283eSFelix Kuehling if (amdgpu_bo_reserve(bo, true))
25355ae0283eSFelix Kuehling return -EAGAIN;
2536c704ab18SChristian König amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
25375ae0283eSFelix Kuehling ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
25385ae0283eSFelix Kuehling amdgpu_bo_unreserve(bo);
25395ae0283eSFelix Kuehling if (ret) {
25405ae0283eSFelix Kuehling pr_err("%s: Failed to invalidate userptr BO\n",
25415ae0283eSFelix Kuehling __func__);
25425ae0283eSFelix Kuehling return -EAGAIN;
25435ae0283eSFelix Kuehling }
25445ae0283eSFelix Kuehling }
25455ae0283eSFelix Kuehling
25465ae0283eSFelix Kuehling /* Get updated user pages */
2547fec8fdb5SChristian König ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
2548f95f51a4SFelix Kuehling &mem->range);
25495ae0283eSFelix Kuehling if (ret) {
25503b8a23aeSPhilip Yang pr_debug("Failed %d to get user pages\n", ret);
2551e82fdb16SPhilip Yang
25523b8a23aeSPhilip Yang /* Return -EFAULT bad address error as success. It will
25533b8a23aeSPhilip Yang * fail later with a VM fault if the GPU tries to access
25543b8a23aeSPhilip Yang * it. Better than hanging indefinitely with stalled
25553b8a23aeSPhilip Yang * user mode queues.
25563b8a23aeSPhilip Yang *
25573b8a23aeSPhilip Yang * Return other error -EBUSY or -ENOMEM to retry restore
25583b8a23aeSPhilip Yang */
25593b8a23aeSPhilip Yang if (ret != -EFAULT)
2560e82fdb16SPhilip Yang return ret;
2561e82fdb16SPhilip Yang
2562f95f51a4SFelix Kuehling ret = 0;
25633b8a23aeSPhilip Yang }
2564f4fd28b6SFelix Kuehling
2565f95f51a4SFelix Kuehling mutex_lock(&process_info->notifier_lock);
2566f95f51a4SFelix Kuehling
2567f4fd28b6SFelix Kuehling /* Mark the BO as valid unless it was invalidated
2568f4fd28b6SFelix Kuehling * again concurrently.
2569f4fd28b6SFelix Kuehling */
2570f95f51a4SFelix Kuehling if (mem->invalid != invalid) {
2571f95f51a4SFelix Kuehling ret = -EAGAIN;
2572f95f51a4SFelix Kuehling goto unlock_out;
2573f95f51a4SFelix Kuehling }
25743af470cbSXiaogang Chen /* set mem valid if mem has hmm range associated */
25753af470cbSXiaogang Chen if (mem->range)
2576f95f51a4SFelix Kuehling mem->invalid = 0;
25775ae0283eSFelix Kuehling }
25785ae0283eSFelix Kuehling
2579f95f51a4SFelix Kuehling unlock_out:
2580f95f51a4SFelix Kuehling mutex_unlock(&process_info->notifier_lock);
2581f95f51a4SFelix Kuehling
2582f95f51a4SFelix Kuehling return ret;
2583e52482deSFelix Kuehling }
2584e52482deSFelix Kuehling
25855ae0283eSFelix Kuehling /* Validate invalid userptr BOs
25865ae0283eSFelix Kuehling *
2587f95f51a4SFelix Kuehling * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
2588f95f51a4SFelix Kuehling * with new page addresses and waits for the page table updates to complete.
25895ae0283eSFelix Kuehling */
validate_invalid_user_pages(struct amdkfd_process_info * process_info)25905ae0283eSFelix Kuehling static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
25915ae0283eSFelix Kuehling {
25928abc1eb2SChristian König struct ttm_operation_ctx ctx = { false, false };
25935ae0283eSFelix Kuehling struct amdgpu_sync sync;
25948abc1eb2SChristian König struct drm_exec exec;
25955ae0283eSFelix Kuehling
25965ae0283eSFelix Kuehling struct amdgpu_vm *peer_vm;
25975ae0283eSFelix Kuehling struct kgd_mem *mem, *tmp_mem;
25985ae0283eSFelix Kuehling struct amdgpu_bo *bo;
25998abc1eb2SChristian König int ret;
26005ae0283eSFelix Kuehling
26015ae0283eSFelix Kuehling amdgpu_sync_create(&sync);
26025ae0283eSFelix Kuehling
260305d24935SRob Clark drm_exec_init(&exec, 0, 0);
26048abc1eb2SChristian König /* Reserve all BOs and page tables for validation */
26058abc1eb2SChristian König drm_exec_until_all_locked(&exec) {
26068abc1eb2SChristian König /* Reserve all the page directories */
26078abc1eb2SChristian König list_for_each_entry(peer_vm, &process_info->vm_list_head,
26088abc1eb2SChristian König vm_list_node) {
26098abc1eb2SChristian König ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
26108abc1eb2SChristian König drm_exec_retry_on_contention(&exec);
26118abc1eb2SChristian König if (unlikely(ret))
26128abc1eb2SChristian König goto unreserve_out;
26138abc1eb2SChristian König }
26148abc1eb2SChristian König
26158abc1eb2SChristian König /* Reserve the userptr_inval_list entries to resv_list */
26168abc1eb2SChristian König list_for_each_entry(mem, &process_info->userptr_inval_list,
26178abc1eb2SChristian König validate_list) {
26188abc1eb2SChristian König struct drm_gem_object *gobj;
26198abc1eb2SChristian König
26208abc1eb2SChristian König gobj = &mem->bo->tbo.base;
26218abc1eb2SChristian König ret = drm_exec_prepare_obj(&exec, gobj, 1);
26228abc1eb2SChristian König drm_exec_retry_on_contention(&exec);
26238abc1eb2SChristian König if (unlikely(ret))
26248abc1eb2SChristian König goto unreserve_out;
26258abc1eb2SChristian König }
26268abc1eb2SChristian König }
26278abc1eb2SChristian König
262850661eb1SFelix Kuehling ret = process_validate_vms(process_info, NULL);
26295ae0283eSFelix Kuehling if (ret)
26305ae0283eSFelix Kuehling goto unreserve_out;
26315ae0283eSFelix Kuehling
26325ae0283eSFelix Kuehling /* Validate BOs and update GPUVM page tables */
26335ae0283eSFelix Kuehling list_for_each_entry_safe(mem, tmp_mem,
26345ae0283eSFelix Kuehling &process_info->userptr_inval_list,
26358abc1eb2SChristian König validate_list) {
2636c780b2eeSFelix Kuehling struct kfd_mem_attachment *attachment;
26375ae0283eSFelix Kuehling
26385ae0283eSFelix Kuehling bo = mem->bo;
26395ae0283eSFelix Kuehling
2640899fbde1SPhilip Yang /* Validate the BO if we got user pages */
2641899fbde1SPhilip Yang if (bo->tbo.ttm->pages[0]) {
2642c704ab18SChristian König amdgpu_bo_placement_from_domain(bo, mem->domain);
26435ae0283eSFelix Kuehling ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
26445ae0283eSFelix Kuehling if (ret) {
26455ae0283eSFelix Kuehling pr_err("%s: failed to validate BO\n", __func__);
26465ae0283eSFelix Kuehling goto unreserve_out;
26475ae0283eSFelix Kuehling }
26485ae0283eSFelix Kuehling }
26495ae0283eSFelix Kuehling
26505ae0283eSFelix Kuehling /* Update mapping. If the BO was not validated
26515ae0283eSFelix Kuehling * (because we couldn't get user pages), this will
26525ae0283eSFelix Kuehling * clear the page table entries, which will result in
26535ae0283eSFelix Kuehling * VM faults if the GPU tries to access the invalid
26545ae0283eSFelix Kuehling * memory.
26555ae0283eSFelix Kuehling */
2656c780b2eeSFelix Kuehling list_for_each_entry(attachment, &mem->attachments, list) {
2657c780b2eeSFelix Kuehling if (!attachment->is_mapped)
26585ae0283eSFelix Kuehling continue;
26595ae0283eSFelix Kuehling
2660b72ed8a2SFelix Kuehling kfd_mem_dmaunmap_attachment(mem, attachment);
26614d30a83cSChristian König ret = update_gpuvm_pte(mem, attachment, &sync);
26625ae0283eSFelix Kuehling if (ret) {
26635ae0283eSFelix Kuehling pr_err("%s: update PTE failed\n", __func__);
26645ae0283eSFelix Kuehling /* make sure this gets validated again */
2665f95f51a4SFelix Kuehling mutex_lock(&process_info->notifier_lock);
2666f95f51a4SFelix Kuehling mem->invalid++;
2667f95f51a4SFelix Kuehling mutex_unlock(&process_info->notifier_lock);
26685ae0283eSFelix Kuehling goto unreserve_out;
26695ae0283eSFelix Kuehling }
26705ae0283eSFelix Kuehling }
26715ae0283eSFelix Kuehling }
26725ae0283eSFelix Kuehling
26735ae0283eSFelix Kuehling /* Update page directories */
26745ae0283eSFelix Kuehling ret = process_update_pds(process_info, &sync);
26755ae0283eSFelix Kuehling
26765ae0283eSFelix Kuehling unreserve_out:
26778abc1eb2SChristian König drm_exec_fini(&exec);
26785ae0283eSFelix Kuehling amdgpu_sync_wait(&sync, false);
26795ae0283eSFelix Kuehling amdgpu_sync_free(&sync);
26805ae0283eSFelix Kuehling
26815ae0283eSFelix Kuehling return ret;
26825ae0283eSFelix Kuehling }
26835ae0283eSFelix Kuehling
2684f95f51a4SFelix Kuehling /* Confirm that all user pages are valid while holding the notifier lock
2685f95f51a4SFelix Kuehling *
2686f95f51a4SFelix Kuehling * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
2687f95f51a4SFelix Kuehling */
confirm_valid_user_pages_locked(struct amdkfd_process_info * process_info)2688f95f51a4SFelix Kuehling static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
2689f95f51a4SFelix Kuehling {
2690f95f51a4SFelix Kuehling struct kgd_mem *mem, *tmp_mem;
2691f95f51a4SFelix Kuehling int ret = 0;
2692f95f51a4SFelix Kuehling
2693f95f51a4SFelix Kuehling list_for_each_entry_safe(mem, tmp_mem,
2694f95f51a4SFelix Kuehling &process_info->userptr_inval_list,
26958abc1eb2SChristian König validate_list) {
26963af470cbSXiaogang Chen bool valid;
26973af470cbSXiaogang Chen
26983af470cbSXiaogang Chen /* keep mem without hmm range at userptr_inval_list */
26993af470cbSXiaogang Chen if (!mem->range)
27003af470cbSXiaogang Chen continue;
27013af470cbSXiaogang Chen
27023af470cbSXiaogang Chen /* Only check mem with hmm range associated */
27033af470cbSXiaogang Chen valid = amdgpu_ttm_tt_get_user_pages_done(
2704f95f51a4SFelix Kuehling mem->bo->tbo.ttm, mem->range);
2705f95f51a4SFelix Kuehling
2706f95f51a4SFelix Kuehling mem->range = NULL;
2707f95f51a4SFelix Kuehling if (!valid) {
2708f95f51a4SFelix Kuehling WARN(!mem->invalid, "Invalid BO not marked invalid");
2709f95f51a4SFelix Kuehling ret = -EAGAIN;
2710f95f51a4SFelix Kuehling continue;
2711f95f51a4SFelix Kuehling }
27123af470cbSXiaogang Chen
27133af470cbSXiaogang Chen if (mem->invalid) {
27143af470cbSXiaogang Chen WARN(1, "Valid BO is marked invalid");
27153af470cbSXiaogang Chen ret = -EAGAIN;
27163af470cbSXiaogang Chen continue;
27173af470cbSXiaogang Chen }
2718f95f51a4SFelix Kuehling
27198abc1eb2SChristian König list_move_tail(&mem->validate_list,
2720f95f51a4SFelix Kuehling &process_info->userptr_valid_list);
2721f95f51a4SFelix Kuehling }
2722f95f51a4SFelix Kuehling
2723f95f51a4SFelix Kuehling return ret;
2724f95f51a4SFelix Kuehling }
2725f95f51a4SFelix Kuehling
27265ae0283eSFelix Kuehling /* Worker callback to restore evicted userptr BOs
27275ae0283eSFelix Kuehling *
27285ae0283eSFelix Kuehling * Tries to update and validate all userptr BOs. If successful and no
27295ae0283eSFelix Kuehling * concurrent evictions happened, the queues are restarted. Otherwise,
27305ae0283eSFelix Kuehling * reschedule for another attempt later.
27315ae0283eSFelix Kuehling */
amdgpu_amdkfd_restore_userptr_worker(struct work_struct * work)27325ae0283eSFelix Kuehling static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
27335ae0283eSFelix Kuehling {
27345ae0283eSFelix Kuehling struct delayed_work *dwork = to_delayed_work(work);
27355ae0283eSFelix Kuehling struct amdkfd_process_info *process_info =
27365ae0283eSFelix Kuehling container_of(dwork, struct amdkfd_process_info,
27375ae0283eSFelix Kuehling restore_userptr_work);
27385ae0283eSFelix Kuehling struct task_struct *usertask;
27395ae0283eSFelix Kuehling struct mm_struct *mm;
2740f95f51a4SFelix Kuehling uint32_t evicted_bos;
27415ae0283eSFelix Kuehling
2742f95f51a4SFelix Kuehling mutex_lock(&process_info->notifier_lock);
2743f95f51a4SFelix Kuehling evicted_bos = process_info->evicted_bos;
2744f95f51a4SFelix Kuehling mutex_unlock(&process_info->notifier_lock);
27455ae0283eSFelix Kuehling if (!evicted_bos)
27465ae0283eSFelix Kuehling return;
27475ae0283eSFelix Kuehling
27485ae0283eSFelix Kuehling /* Reference task and mm in case of concurrent process termination */
27495ae0283eSFelix Kuehling usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
27505ae0283eSFelix Kuehling if (!usertask)
27515ae0283eSFelix Kuehling return;
27525ae0283eSFelix Kuehling mm = get_task_mm(usertask);
27535ae0283eSFelix Kuehling if (!mm) {
27545ae0283eSFelix Kuehling put_task_struct(usertask);
27555ae0283eSFelix Kuehling return;
27565ae0283eSFelix Kuehling }
27575ae0283eSFelix Kuehling
27585ae0283eSFelix Kuehling mutex_lock(&process_info->lock);
27595ae0283eSFelix Kuehling
27605ae0283eSFelix Kuehling if (update_invalid_user_pages(process_info, mm))
27615ae0283eSFelix Kuehling goto unlock_out;
27625ae0283eSFelix Kuehling /* userptr_inval_list can be empty if all evicted userptr BOs
27635ae0283eSFelix Kuehling * have been freed. In that case there is nothing to validate
27645ae0283eSFelix Kuehling * and we can just restart the queues.
27655ae0283eSFelix Kuehling */
27665ae0283eSFelix Kuehling if (!list_empty(&process_info->userptr_inval_list)) {
27675ae0283eSFelix Kuehling if (validate_invalid_user_pages(process_info))
27685ae0283eSFelix Kuehling goto unlock_out;
27695ae0283eSFelix Kuehling }
27705ae0283eSFelix Kuehling /* Final check for concurrent evicton and atomic update. If
27715ae0283eSFelix Kuehling * another eviction happens after successful update, it will
27725ae0283eSFelix Kuehling * be a first eviction that calls quiesce_mm. The eviction
27735ae0283eSFelix Kuehling * reference counting inside KFD will handle this case.
27745ae0283eSFelix Kuehling */
2775f95f51a4SFelix Kuehling mutex_lock(&process_info->notifier_lock);
2776f95f51a4SFelix Kuehling if (process_info->evicted_bos != evicted_bos)
2777f95f51a4SFelix Kuehling goto unlock_notifier_out;
2778f95f51a4SFelix Kuehling
2779f95f51a4SFelix Kuehling if (confirm_valid_user_pages_locked(process_info)) {
2780f95f51a4SFelix Kuehling WARN(1, "User pages unexpectedly invalid");
2781f95f51a4SFelix Kuehling goto unlock_notifier_out;
2782f95f51a4SFelix Kuehling }
2783f95f51a4SFelix Kuehling
2784f95f51a4SFelix Kuehling process_info->evicted_bos = evicted_bos = 0;
2785f95f51a4SFelix Kuehling
27868e07e267SAmber Lin if (kgd2kfd_resume_mm(mm)) {
27875ae0283eSFelix Kuehling pr_err("%s: Failed to resume KFD\n", __func__);
27885ae0283eSFelix Kuehling /* No recovery from this failure. Probably the CP is
27895ae0283eSFelix Kuehling * hanging. No point trying again.
27905ae0283eSFelix Kuehling */
27915ae0283eSFelix Kuehling }
27926c55d6e9SPhilip Yang
2793f95f51a4SFelix Kuehling unlock_notifier_out:
2794f95f51a4SFelix Kuehling mutex_unlock(&process_info->notifier_lock);
27955ae0283eSFelix Kuehling unlock_out:
27965ae0283eSFelix Kuehling mutex_unlock(&process_info->lock);
27975ae0283eSFelix Kuehling
27985ae0283eSFelix Kuehling /* If validation failed, reschedule another attempt */
2799c7f21978SPhilip Yang if (evicted_bos) {
28009a1c1339SFelix Kuehling queue_delayed_work(system_freezable_wq,
28019a1c1339SFelix Kuehling &process_info->restore_userptr_work,
28025ae0283eSFelix Kuehling msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2803c7f21978SPhilip Yang
2804c7f21978SPhilip Yang kfd_smi_event_queue_restore_rescheduled(mm);
2805c7f21978SPhilip Yang }
2806c7f21978SPhilip Yang mmput(mm);
2807c7f21978SPhilip Yang put_task_struct(usertask);
28085ae0283eSFelix Kuehling }
28095ae0283eSFelix Kuehling
replace_eviction_fence(struct dma_fence __rcu ** ef,struct dma_fence * new_ef)2810c147ddc6SFelix Kuehling static void replace_eviction_fence(struct dma_fence __rcu **ef,
28119a1c1339SFelix Kuehling struct dma_fence *new_ef)
28129a1c1339SFelix Kuehling {
28139a1c1339SFelix Kuehling struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true
28149a1c1339SFelix Kuehling /* protected by process_info->lock */);
28159a1c1339SFelix Kuehling
28169a1c1339SFelix Kuehling /* If we're replacing an unsignaled eviction fence, that fence will
28179a1c1339SFelix Kuehling * never be signaled, and if anyone is still waiting on that fence,
28189a1c1339SFelix Kuehling * they will hang forever. This should never happen. We should only
28199a1c1339SFelix Kuehling * replace the fence in restore_work that only gets scheduled after
28209a1c1339SFelix Kuehling * eviction work signaled the fence.
28219a1c1339SFelix Kuehling */
28229a1c1339SFelix Kuehling WARN_ONCE(!dma_fence_is_signaled(old_ef),
28239a1c1339SFelix Kuehling "Replacing unsignaled eviction fence");
28249a1c1339SFelix Kuehling dma_fence_put(old_ef);
28259a1c1339SFelix Kuehling }
28269a1c1339SFelix Kuehling
2827a46a2cd1SFelix Kuehling /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2828a46a2cd1SFelix Kuehling * KFD process identified by process_info
2829a46a2cd1SFelix Kuehling *
2830a46a2cd1SFelix Kuehling * @process_info: amdkfd_process_info of the KFD process
2831a46a2cd1SFelix Kuehling *
2832a46a2cd1SFelix Kuehling * After memory eviction, restore thread calls this function. The function
2833a46a2cd1SFelix Kuehling * should be called when the Process is still valid. BO restore involves -
2834a46a2cd1SFelix Kuehling *
2835a46a2cd1SFelix Kuehling * 1. Release old eviction fence and create new one
2836a46a2cd1SFelix Kuehling * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2837a46a2cd1SFelix Kuehling * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2838a46a2cd1SFelix Kuehling * BOs that need to be reserved.
2839a46a2cd1SFelix Kuehling * 4. Reserve all the BOs
2840a46a2cd1SFelix Kuehling * 5. Validate of PD and PT BOs.
2841a46a2cd1SFelix Kuehling * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2842a46a2cd1SFelix Kuehling * 7. Add fence to all PD and PT BOs.
2843a46a2cd1SFelix Kuehling * 8. Unreserve all BOs
2844a46a2cd1SFelix Kuehling */
amdgpu_amdkfd_gpuvm_restore_process_bos(void * info,struct dma_fence __rcu ** ef)2845c147ddc6SFelix Kuehling int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef)
2846a46a2cd1SFelix Kuehling {
2847a46a2cd1SFelix Kuehling struct amdkfd_process_info *process_info = info;
28485b21d3e5SFelix Kuehling struct amdgpu_vm *peer_vm;
2849a46a2cd1SFelix Kuehling struct kgd_mem *mem;
2850a46a2cd1SFelix Kuehling struct list_head duplicate_save;
2851a46a2cd1SFelix Kuehling struct amdgpu_sync sync_obj;
285215024dafSPhilip Yang unsigned long failed_size = 0;
285315024dafSPhilip Yang unsigned long total_size = 0;
28548abc1eb2SChristian König struct drm_exec exec;
28558abc1eb2SChristian König int ret;
2856a46a2cd1SFelix Kuehling
2857a46a2cd1SFelix Kuehling INIT_LIST_HEAD(&duplicate_save);
2858a46a2cd1SFelix Kuehling
2859a46a2cd1SFelix Kuehling mutex_lock(&process_info->lock);
28608abc1eb2SChristian König
286171b9d192SMukul Joshi drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
28628abc1eb2SChristian König drm_exec_until_all_locked(&exec) {
2863a46a2cd1SFelix Kuehling list_for_each_entry(peer_vm, &process_info->vm_list_head,
28648abc1eb2SChristian König vm_list_node) {
28658abc1eb2SChristian König ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
28668abc1eb2SChristian König drm_exec_retry_on_contention(&exec);
286771b9d192SMukul Joshi if (unlikely(ret)) {
286871b9d192SMukul Joshi pr_err("Locking VM PD failed, ret: %d\n", ret);
28698abc1eb2SChristian König goto ttm_reserve_fail;
28708abc1eb2SChristian König }
287171b9d192SMukul Joshi }
2872a46a2cd1SFelix Kuehling
2873a46a2cd1SFelix Kuehling /* Reserve all BOs and page tables/directory. Add all BOs from
2874a46a2cd1SFelix Kuehling * kfd_bo_list to ctx.list
2875a46a2cd1SFelix Kuehling */
2876a46a2cd1SFelix Kuehling list_for_each_entry(mem, &process_info->kfd_bo_list,
28778abc1eb2SChristian König validate_list) {
28788abc1eb2SChristian König struct drm_gem_object *gobj;
2879a46a2cd1SFelix Kuehling
28808abc1eb2SChristian König gobj = &mem->bo->tbo.base;
28818abc1eb2SChristian König ret = drm_exec_prepare_obj(&exec, gobj, 1);
28828abc1eb2SChristian König drm_exec_retry_on_contention(&exec);
288371b9d192SMukul Joshi if (unlikely(ret)) {
288471b9d192SMukul Joshi pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
2885a46a2cd1SFelix Kuehling goto ttm_reserve_fail;
2886a46a2cd1SFelix Kuehling }
28878abc1eb2SChristian König }
288871b9d192SMukul Joshi }
2889a46a2cd1SFelix Kuehling
2890a46a2cd1SFelix Kuehling amdgpu_sync_create(&sync_obj);
2891a46a2cd1SFelix Kuehling
289281bf1451SLang Yu /* Validate BOs managed by KFD */
2893a46a2cd1SFelix Kuehling list_for_each_entry(mem, &process_info->kfd_bo_list,
28948abc1eb2SChristian König validate_list) {
2895a46a2cd1SFelix Kuehling
2896a46a2cd1SFelix Kuehling struct amdgpu_bo *bo = mem->bo;
2897a46a2cd1SFelix Kuehling uint32_t domain = mem->domain;
28988bb31587SChristian König struct dma_resv_iter cursor;
28998bb31587SChristian König struct dma_fence *fence;
2900a46a2cd1SFelix Kuehling
290115024dafSPhilip Yang total_size += amdgpu_bo_size(bo);
290215024dafSPhilip Yang
2903a46a2cd1SFelix Kuehling ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2904a46a2cd1SFelix Kuehling if (ret) {
290515024dafSPhilip Yang pr_debug("Memory eviction: Validate BOs failed\n");
290615024dafSPhilip Yang failed_size += amdgpu_bo_size(bo);
290715024dafSPhilip Yang ret = amdgpu_amdkfd_bo_validate(bo,
290815024dafSPhilip Yang AMDGPU_GEM_DOMAIN_GTT, false);
290915024dafSPhilip Yang if (ret) {
291015024dafSPhilip Yang pr_debug("Memory eviction: Try again\n");
2911a46a2cd1SFelix Kuehling goto validate_map_fail;
2912a46a2cd1SFelix Kuehling }
291315024dafSPhilip Yang }
29148bb31587SChristian König dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
29158bb31587SChristian König DMA_RESV_USAGE_KERNEL, fence) {
2916*16590745SChristian König ret = amdgpu_sync_fence(&sync_obj, fence, GFP_KERNEL);
29173d97da44SHarish Kasiviswanathan if (ret) {
29183d97da44SHarish Kasiviswanathan pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
29193d97da44SHarish Kasiviswanathan goto validate_map_fail;
29203d97da44SHarish Kasiviswanathan }
29218bb31587SChristian König }
292281bf1451SLang Yu }
292381bf1451SLang Yu
292481bf1451SLang Yu if (failed_size)
292581bf1451SLang Yu pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
292681bf1451SLang Yu
292781bf1451SLang Yu /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
292881bf1451SLang Yu * validations above would invalidate DMABuf imports again.
292981bf1451SLang Yu */
293081bf1451SLang Yu ret = process_validate_vms(process_info, &exec.ticket);
293181bf1451SLang Yu if (ret) {
293281bf1451SLang Yu pr_debug("Validating VMs failed, ret: %d\n", ret);
293381bf1451SLang Yu goto validate_map_fail;
293481bf1451SLang Yu }
293581bf1451SLang Yu
293681bf1451SLang Yu /* Update mappings managed by KFD. */
293781bf1451SLang Yu list_for_each_entry(mem, &process_info->kfd_bo_list,
293881bf1451SLang Yu validate_list) {
293981bf1451SLang Yu struct kfd_mem_attachment *attachment;
294081bf1451SLang Yu
2941c780b2eeSFelix Kuehling list_for_each_entry(attachment, &mem->attachments, list) {
2942b72ed8a2SFelix Kuehling if (!attachment->is_mapped)
2943b72ed8a2SFelix Kuehling continue;
2944b72ed8a2SFelix Kuehling
2945b72ed8a2SFelix Kuehling kfd_mem_dmaunmap_attachment(mem, attachment);
29464d30a83cSChristian König ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2947a46a2cd1SFelix Kuehling if (ret) {
2948a46a2cd1SFelix Kuehling pr_debug("Memory eviction: update PTE failed. Try again\n");
2949a46a2cd1SFelix Kuehling goto validate_map_fail;
2950a46a2cd1SFelix Kuehling }
2951a46a2cd1SFelix Kuehling }
2952a46a2cd1SFelix Kuehling }
2953a46a2cd1SFelix Kuehling
2954e6ed364eSFelix Kuehling /* Update mappings not managed by KFD */
2955e6ed364eSFelix Kuehling list_for_each_entry(peer_vm, &process_info->vm_list_head,
2956e6ed364eSFelix Kuehling vm_list_node) {
2957e6ed364eSFelix Kuehling struct amdgpu_device *adev = amdgpu_ttm_adev(
2958e6ed364eSFelix Kuehling peer_vm->root.bo->tbo.bdev);
2959e6ed364eSFelix Kuehling
2960e6ed364eSFelix Kuehling ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
2961e6ed364eSFelix Kuehling if (ret) {
2962e6ed364eSFelix Kuehling pr_debug("Memory eviction: handle moved failed. Try again\n");
2963e6ed364eSFelix Kuehling goto validate_map_fail;
2964e6ed364eSFelix Kuehling }
2965e6ed364eSFelix Kuehling }
2966e6ed364eSFelix Kuehling
2967a46a2cd1SFelix Kuehling /* Update page directories */
2968a46a2cd1SFelix Kuehling ret = process_update_pds(process_info, &sync_obj);
2969a46a2cd1SFelix Kuehling if (ret) {
2970a46a2cd1SFelix Kuehling pr_debug("Memory eviction: update PDs failed. Try again\n");
2971a46a2cd1SFelix Kuehling goto validate_map_fail;
2972a46a2cd1SFelix Kuehling }
2973a46a2cd1SFelix Kuehling
2974e6ed364eSFelix Kuehling /* Sync with fences on all the page tables. They implicitly depend on any
2975e6ed364eSFelix Kuehling * move fences from amdgpu_vm_handle_moved above.
2976e6ed364eSFelix Kuehling */
2977e6ed364eSFelix Kuehling ret = process_sync_pds_resv(process_info, &sync_obj);
2978e6ed364eSFelix Kuehling if (ret) {
2979e6ed364eSFelix Kuehling pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2980e6ed364eSFelix Kuehling goto validate_map_fail;
2981e6ed364eSFelix Kuehling }
2982e6ed364eSFelix Kuehling
29833d97da44SHarish Kasiviswanathan /* Wait for validate and PT updates to finish */
2984a46a2cd1SFelix Kuehling amdgpu_sync_wait(&sync_obj, false);
2985a46a2cd1SFelix Kuehling
29869a1c1339SFelix Kuehling /* The old eviction fence may be unsignaled if restore happens
29879a1c1339SFelix Kuehling * after a GPU reset or suspend/resume. Keep the old fence in that
29889a1c1339SFelix Kuehling * case. Otherwise release the old eviction fence and create new
29899a1c1339SFelix Kuehling * one, because fence only goes from unsignaled to signaled once
29909a1c1339SFelix Kuehling * and cannot be reused. Use context and mm from the old fence.
29919a1c1339SFelix Kuehling *
29929a1c1339SFelix Kuehling * If an old eviction fence signals after this check, that's OK.
29939a1c1339SFelix Kuehling * Anyone signaling an eviction fence must stop the queues first
29949a1c1339SFelix Kuehling * and schedule another restore worker.
2995a46a2cd1SFelix Kuehling */
29969a1c1339SFelix Kuehling if (dma_fence_is_signaled(&process_info->eviction_fence->base)) {
29979a1c1339SFelix Kuehling struct amdgpu_amdkfd_fence *new_fence =
29989a1c1339SFelix Kuehling amdgpu_amdkfd_fence_create(
2999a46a2cd1SFelix Kuehling process_info->eviction_fence->base.context,
3000eb2cec55SAlex Sierra process_info->eviction_fence->mm,
3001eb2cec55SAlex Sierra NULL);
30029a1c1339SFelix Kuehling
3003a46a2cd1SFelix Kuehling if (!new_fence) {
3004a46a2cd1SFelix Kuehling pr_err("Failed to create eviction fence\n");
3005a46a2cd1SFelix Kuehling ret = -ENOMEM;
3006a46a2cd1SFelix Kuehling goto validate_map_fail;
3007a46a2cd1SFelix Kuehling }
3008a46a2cd1SFelix Kuehling dma_fence_put(&process_info->eviction_fence->base);
3009a46a2cd1SFelix Kuehling process_info->eviction_fence = new_fence;
30109a1c1339SFelix Kuehling replace_eviction_fence(ef, dma_fence_get(&new_fence->base));
30119a1c1339SFelix Kuehling } else {
30129a1c1339SFelix Kuehling WARN_ONCE(*ef != &process_info->eviction_fence->base,
30139a1c1339SFelix Kuehling "KFD eviction fence doesn't match KGD process_info");
30149a1c1339SFelix Kuehling }
3015a46a2cd1SFelix Kuehling
30164fac4fcfSLang Yu /* Attach new eviction fence to all BOs except pinned ones */
30178abc1eb2SChristian König list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
30184fac4fcfSLang Yu if (mem->bo->tbo.pin_count)
30194fac4fcfSLang Yu continue;
30204fac4fcfSLang Yu
302142470840SChristian König dma_resv_add_fence(mem->bo->tbo.base.resv,
302242470840SChristian König &process_info->eviction_fence->base,
302342470840SChristian König DMA_RESV_USAGE_BOOKKEEP);
30244fac4fcfSLang Yu }
302550661eb1SFelix Kuehling /* Attach eviction fence to PD / PT BOs and DMABuf imports */
3026a46a2cd1SFelix Kuehling list_for_each_entry(peer_vm, &process_info->vm_list_head,
3027a46a2cd1SFelix Kuehling vm_list_node) {
3028391629bdSNirmoy Das struct amdgpu_bo *bo = peer_vm->root.bo;
3029a46a2cd1SFelix Kuehling
303042470840SChristian König dma_resv_add_fence(bo->tbo.base.resv,
303142470840SChristian König &process_info->eviction_fence->base,
303242470840SChristian König DMA_RESV_USAGE_BOOKKEEP);
3033a46a2cd1SFelix Kuehling }
3034a46a2cd1SFelix Kuehling
3035a46a2cd1SFelix Kuehling validate_map_fail:
3036a46a2cd1SFelix Kuehling amdgpu_sync_free(&sync_obj);
3037a46a2cd1SFelix Kuehling ttm_reserve_fail:
30388abc1eb2SChristian König drm_exec_fini(&exec);
3039a46a2cd1SFelix Kuehling mutex_unlock(&process_info->lock);
3040a46a2cd1SFelix Kuehling return ret;
3041a46a2cd1SFelix Kuehling }
304271efab6aSOak Zeng
amdgpu_amdkfd_add_gws_to_process(void * info,void * gws,struct kgd_mem ** mem)304371efab6aSOak Zeng int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
304471efab6aSOak Zeng {
304571efab6aSOak Zeng struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
304671efab6aSOak Zeng struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
304771efab6aSOak Zeng int ret;
304871efab6aSOak Zeng
304971efab6aSOak Zeng if (!info || !gws)
305071efab6aSOak Zeng return -EINVAL;
305171efab6aSOak Zeng
305271efab6aSOak Zeng *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
305371efab6aSOak Zeng if (!*mem)
3054443e902eSOak Zeng return -ENOMEM;
305571efab6aSOak Zeng
305671efab6aSOak Zeng mutex_init(&(*mem)->lock);
3057c780b2eeSFelix Kuehling INIT_LIST_HEAD(&(*mem)->attachments);
305871efab6aSOak Zeng (*mem)->bo = amdgpu_bo_ref(gws_bo);
305971efab6aSOak Zeng (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
306071efab6aSOak Zeng (*mem)->process_info = process_info;
306171efab6aSOak Zeng add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
306271efab6aSOak Zeng amdgpu_sync_create(&(*mem)->sync);
306371efab6aSOak Zeng
306471efab6aSOak Zeng
306571efab6aSOak Zeng /* Validate gws bo the first time it is added to process */
306671efab6aSOak Zeng mutex_lock(&(*mem)->process_info->lock);
306771efab6aSOak Zeng ret = amdgpu_bo_reserve(gws_bo, false);
306871efab6aSOak Zeng if (unlikely(ret)) {
306971efab6aSOak Zeng pr_err("Reserve gws bo failed %d\n", ret);
307071efab6aSOak Zeng goto bo_reservation_failure;
307171efab6aSOak Zeng }
307271efab6aSOak Zeng
307371efab6aSOak Zeng ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
307471efab6aSOak Zeng if (ret) {
307571efab6aSOak Zeng pr_err("GWS BO validate failed %d\n", ret);
307671efab6aSOak Zeng goto bo_validation_failure;
307771efab6aSOak Zeng }
307871efab6aSOak Zeng /* GWS resource is shared b/t amdgpu and amdkfd
307971efab6aSOak Zeng * Add process eviction fence to bo so they can
308071efab6aSOak Zeng * evict each other.
308171efab6aSOak Zeng */
3082c8d4c18bSChristian König ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
308396cf624bSOak Zeng if (ret)
308496cf624bSOak Zeng goto reserve_shared_fail;
308542470840SChristian König dma_resv_add_fence(gws_bo->tbo.base.resv,
308642470840SChristian König &process_info->eviction_fence->base,
308742470840SChristian König DMA_RESV_USAGE_BOOKKEEP);
308871efab6aSOak Zeng amdgpu_bo_unreserve(gws_bo);
308971efab6aSOak Zeng mutex_unlock(&(*mem)->process_info->lock);
309071efab6aSOak Zeng
309171efab6aSOak Zeng return ret;
309271efab6aSOak Zeng
309396cf624bSOak Zeng reserve_shared_fail:
309471efab6aSOak Zeng bo_validation_failure:
309571efab6aSOak Zeng amdgpu_bo_unreserve(gws_bo);
309671efab6aSOak Zeng bo_reservation_failure:
309771efab6aSOak Zeng mutex_unlock(&(*mem)->process_info->lock);
309871efab6aSOak Zeng amdgpu_sync_free(&(*mem)->sync);
309971efab6aSOak Zeng remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
310071efab6aSOak Zeng amdgpu_bo_unref(&gws_bo);
310171efab6aSOak Zeng mutex_destroy(&(*mem)->lock);
310271efab6aSOak Zeng kfree(*mem);
310371efab6aSOak Zeng *mem = NULL;
310471efab6aSOak Zeng return ret;
310571efab6aSOak Zeng }
310671efab6aSOak Zeng
amdgpu_amdkfd_remove_gws_from_process(void * info,void * mem)310771efab6aSOak Zeng int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
310871efab6aSOak Zeng {
310971efab6aSOak Zeng int ret;
311071efab6aSOak Zeng struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
311171efab6aSOak Zeng struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
311271efab6aSOak Zeng struct amdgpu_bo *gws_bo = kgd_mem->bo;
311371efab6aSOak Zeng
311471efab6aSOak Zeng /* Remove BO from process's validate list so restore worker won't touch
311571efab6aSOak Zeng * it anymore
311671efab6aSOak Zeng */
311771efab6aSOak Zeng remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
311871efab6aSOak Zeng
311971efab6aSOak Zeng ret = amdgpu_bo_reserve(gws_bo, false);
312071efab6aSOak Zeng if (unlikely(ret)) {
312171efab6aSOak Zeng pr_err("Reserve gws bo failed %d\n", ret);
312271efab6aSOak Zeng //TODO add BO back to validate_list?
312371efab6aSOak Zeng return ret;
312471efab6aSOak Zeng }
312571efab6aSOak Zeng amdgpu_amdkfd_remove_eviction_fence(gws_bo,
312671efab6aSOak Zeng process_info->eviction_fence);
312771efab6aSOak Zeng amdgpu_bo_unreserve(gws_bo);
312871efab6aSOak Zeng amdgpu_sync_free(&kgd_mem->sync);
312971efab6aSOak Zeng amdgpu_bo_unref(&gws_bo);
313071efab6aSOak Zeng mutex_destroy(&kgd_mem->lock);
313171efab6aSOak Zeng kfree(mem);
313271efab6aSOak Zeng return 0;
313371efab6aSOak Zeng }
3134fd7d08baSYong Zhao
3135fd7d08baSYong Zhao /* Returns GPU-specific tiling mode information */
amdgpu_amdkfd_get_tile_config(struct amdgpu_device * adev,struct tile_config * config)3136dff63da9SGraham Sider int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
3137fd7d08baSYong Zhao struct tile_config *config)
3138fd7d08baSYong Zhao {
3139fd7d08baSYong Zhao config->gb_addr_config = adev->gfx.config.gb_addr_config;
3140fd7d08baSYong Zhao config->tile_config_ptr = adev->gfx.config.tile_mode_array;
3141fd7d08baSYong Zhao config->num_tile_configs =
3142fd7d08baSYong Zhao ARRAY_SIZE(adev->gfx.config.tile_mode_array);
3143fd7d08baSYong Zhao config->macro_tile_config_ptr =
3144fd7d08baSYong Zhao adev->gfx.config.macrotile_mode_array;
3145fd7d08baSYong Zhao config->num_macro_tile_configs =
3146fd7d08baSYong Zhao ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
3147fd7d08baSYong Zhao
3148fd7d08baSYong Zhao /* Those values are not set from GFX9 onwards */
3149fd7d08baSYong Zhao config->num_banks = adev->gfx.config.num_banks;
3150fd7d08baSYong Zhao config->num_ranks = adev->gfx.config.num_ranks;
3151fd7d08baSYong Zhao
3152fd7d08baSYong Zhao return 0;
3153fd7d08baSYong Zhao }
31545ccbb057SRajneesh Bhardwaj
amdgpu_amdkfd_bo_mapped_to_dev(void * drm_priv,struct kgd_mem * mem)3155f9e292cbSPhilip Yang bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem)
31565ccbb057SRajneesh Bhardwaj {
3157f9e292cbSPhilip Yang struct amdgpu_vm *vm = drm_priv_to_vm(drm_priv);
31585ccbb057SRajneesh Bhardwaj struct kfd_mem_attachment *entry;
31595ccbb057SRajneesh Bhardwaj
31605ccbb057SRajneesh Bhardwaj list_for_each_entry(entry, &mem->attachments, list) {
3161f9e292cbSPhilip Yang if (entry->is_mapped && entry->bo_va->base.vm == vm)
31625ccbb057SRajneesh Bhardwaj return true;
31635ccbb057SRajneesh Bhardwaj }
31645ccbb057SRajneesh Bhardwaj return false;
31655ccbb057SRajneesh Bhardwaj }
31663d2af401SAlex Sierra
31673d2af401SAlex Sierra #if defined(CONFIG_DEBUG_FS)
31683d2af401SAlex Sierra
kfd_debugfs_kfd_mem_limits(struct seq_file * m,void * data)31693d2af401SAlex Sierra int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data)
31703d2af401SAlex Sierra {
31713d2af401SAlex Sierra
31723d2af401SAlex Sierra spin_lock(&kfd_mem_limit.mem_limit_lock);
31733d2af401SAlex Sierra seq_printf(m, "System mem used %lldM out of %lluM\n",
31743d2af401SAlex Sierra (kfd_mem_limit.system_mem_used >> 20),
31753d2af401SAlex Sierra (kfd_mem_limit.max_system_mem_limit >> 20));
31763d2af401SAlex Sierra seq_printf(m, "TTM mem used %lldM out of %lluM\n",
31773d2af401SAlex Sierra (kfd_mem_limit.ttm_mem_used >> 20),
31783d2af401SAlex Sierra (kfd_mem_limit.max_ttm_mem_limit >> 20));
31793d2af401SAlex Sierra spin_unlock(&kfd_mem_limit.mem_limit_lock);
31803d2af401SAlex Sierra
31813d2af401SAlex Sierra return 0;
31823d2af401SAlex Sierra }
31833d2af401SAlex Sierra
31843d2af401SAlex Sierra #endif
3185