1130e0371SOded Gabbay /*
2130e0371SOded Gabbay * Copyright 2014 Advanced Micro Devices, Inc.
3130e0371SOded Gabbay *
4130e0371SOded Gabbay * Permission is hereby granted, free of charge, to any person obtaining a
5130e0371SOded Gabbay * copy of this software and associated documentation files (the "Software"),
6130e0371SOded Gabbay * to deal in the Software without restriction, including without limitation
7130e0371SOded Gabbay * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8130e0371SOded Gabbay * and/or sell copies of the Software, and to permit persons to whom the
9130e0371SOded Gabbay * Software is furnished to do so, subject to the following conditions:
10130e0371SOded Gabbay *
11130e0371SOded Gabbay * The above copyright notice and this permission notice shall be included in
12130e0371SOded Gabbay * all copies or substantial portions of the Software.
13130e0371SOded Gabbay *
14130e0371SOded Gabbay * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15130e0371SOded Gabbay * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16130e0371SOded Gabbay * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17130e0371SOded Gabbay * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18130e0371SOded Gabbay * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19130e0371SOded Gabbay * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20130e0371SOded Gabbay * OTHER DEALINGS IN THE SOFTWARE.
21130e0371SOded Gabbay */
22130e0371SOded Gabbay
23130e0371SOded Gabbay /* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
24130e0371SOded Gabbay
25130e0371SOded Gabbay #ifndef AMDGPU_AMDKFD_H_INCLUDED
26130e0371SOded Gabbay #define AMDGPU_AMDKFD_H_INCLUDED
27130e0371SOded Gabbay
288abc1eb2SChristian König #include <linux/list.h>
29130e0371SOded Gabbay #include <linux/types.h>
307420f482SOded Gabbay #include <linux/mm.h>
319bf5b9ebSChristoph Hellwig #include <linux/kthread.h>
325ae0283eSFelix Kuehling #include <linux/workqueue.h>
33f95f51a4SFelix Kuehling #include <linux/mmu_notifier.h>
34610dab11SPhilip Yang #include <linux/memremap.h>
35130e0371SOded Gabbay #include <kgd_kfd_interface.h>
3618192001SFelix Kuehling #include <drm/drm_client.h>
37a46a2cd1SFelix Kuehling #include "amdgpu_sync.h"
38a46a2cd1SFelix Kuehling #include "amdgpu_vm.h"
391c77527aSMukul Joshi #include "amdgpu_xcp.h"
40130e0371SOded Gabbay
41611736d8SFelix Kuehling extern uint64_t amdgpu_amdkfd_total_mem_size;
42d8d019ccSFelix Kuehling
43765385ecSPhilip Yang enum TLB_FLUSH_TYPE {
44765385ecSPhilip Yang TLB_FLUSH_LEGACY = 0,
45765385ecSPhilip Yang TLB_FLUSH_LIGHTWEIGHT,
46765385ecSPhilip Yang TLB_FLUSH_HEAVYWEIGHT
47765385ecSPhilip Yang };
48765385ecSPhilip Yang
49130e0371SOded Gabbay struct amdgpu_device;
508544374cSXiaogang Chen struct kfd_process_device;
51dbe2c4c8SEric Huang struct amdgpu_reset_context;
52130e0371SOded Gabbay
53264fb4d3SFelix Kuehling enum kfd_mem_attachment_type {
54264fb4d3SFelix Kuehling KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */
55264fb4d3SFelix Kuehling KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */
565ac3c3e4SFelix Kuehling KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */
5708a2fd23SRamesh Errabolu KFD_MEM_ATT_SG /* Tag to DMA map SG BOs */
58264fb4d3SFelix Kuehling };
59264fb4d3SFelix Kuehling
60c780b2eeSFelix Kuehling struct kfd_mem_attachment {
61c780b2eeSFelix Kuehling struct list_head list;
62264fb4d3SFelix Kuehling enum kfd_mem_attachment_type type;
63a46a2cd1SFelix Kuehling bool is_mapped;
64a46a2cd1SFelix Kuehling struct amdgpu_bo_va *bo_va;
65c780b2eeSFelix Kuehling struct amdgpu_device *adev;
66a46a2cd1SFelix Kuehling uint64_t va;
67a46a2cd1SFelix Kuehling uint64_t pte_flags;
68a46a2cd1SFelix Kuehling };
69a46a2cd1SFelix Kuehling
70130e0371SOded Gabbay struct kgd_mem {
71a46a2cd1SFelix Kuehling struct mutex lock;
72130e0371SOded Gabbay struct amdgpu_bo *bo;
735ac3c3e4SFelix Kuehling struct dma_buf *dmabuf;
74f95f51a4SFelix Kuehling struct hmm_range *range;
75c780b2eeSFelix Kuehling struct list_head attachments;
76a46a2cd1SFelix Kuehling /* protected by amdkfd_process_info.lock */
778abc1eb2SChristian König struct list_head validate_list;
78a46a2cd1SFelix Kuehling uint32_t domain;
79a46a2cd1SFelix Kuehling unsigned int mapped_to_gpu_memory;
80a46a2cd1SFelix Kuehling uint64_t va;
81a46a2cd1SFelix Kuehling
82d0ba51b1SFelix Kuehling uint32_t alloc_flags;
83a46a2cd1SFelix Kuehling
84f95f51a4SFelix Kuehling uint32_t invalid;
85a46a2cd1SFelix Kuehling struct amdkfd_process_info *process_info;
86a46a2cd1SFelix Kuehling
87a46a2cd1SFelix Kuehling struct amdgpu_sync sync;
88a46a2cd1SFelix Kuehling
8918192001SFelix Kuehling uint32_t gem_handle;
90a46a2cd1SFelix Kuehling bool aql_queue;
91d4566deeSMukul Joshi bool is_imported;
92130e0371SOded Gabbay };
93130e0371SOded Gabbay
94d8d019ccSFelix Kuehling /* KFD Memory Eviction */
95d8d019ccSFelix Kuehling struct amdgpu_amdkfd_fence {
96d8d019ccSFelix Kuehling struct dma_fence base;
97d8d019ccSFelix Kuehling struct mm_struct *mm;
98d8d019ccSFelix Kuehling spinlock_t lock;
99d8d019ccSFelix Kuehling char timeline_name[TASK_COMM_LEN];
100eb2cec55SAlex Sierra struct svm_range_bo *svm_bo;
101d8d019ccSFelix Kuehling };
102d8d019ccSFelix Kuehling
103611736d8SFelix Kuehling struct amdgpu_kfd_dev {
104611736d8SFelix Kuehling struct kfd_dev *dev;
1051c77527aSMukul Joshi int64_t vram_used[MAX_XCP];
1061c77527aSMukul Joshi uint64_t vram_used_aligned[MAX_XCP];
1078e2712e7Sshaoyunl bool init_complete;
108b5fd0cf3SAndrey Grodzovsky struct work_struct reset_work;
109610dab11SPhilip Yang
110610dab11SPhilip Yang /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
111610dab11SPhilip Yang struct dev_pagemap pgmap;
11218192001SFelix Kuehling
11318192001SFelix Kuehling /* Client for KFD BO GEM handle allocations */
11418192001SFelix Kuehling struct drm_client_dev client;
115611736d8SFelix Kuehling };
116611736d8SFelix Kuehling
1170da8b10eSAmber Lin enum kgd_engine_type {
1180da8b10eSAmber Lin KGD_ENGINE_PFP = 1,
1190da8b10eSAmber Lin KGD_ENGINE_ME,
1200da8b10eSAmber Lin KGD_ENGINE_CE,
1210da8b10eSAmber Lin KGD_ENGINE_MEC1,
1220da8b10eSAmber Lin KGD_ENGINE_MEC2,
1230da8b10eSAmber Lin KGD_ENGINE_RLC,
1240da8b10eSAmber Lin KGD_ENGINE_SDMA1,
1250da8b10eSAmber Lin KGD_ENGINE_SDMA2,
1260da8b10eSAmber Lin KGD_ENGINE_MAX
1270da8b10eSAmber Lin };
1280da8b10eSAmber Lin
129d8d019ccSFelix Kuehling
130a46a2cd1SFelix Kuehling struct amdkfd_process_info {
131a46a2cd1SFelix Kuehling /* List head of all VMs that belong to a KFD process */
132a46a2cd1SFelix Kuehling struct list_head vm_list_head;
133a46a2cd1SFelix Kuehling /* List head for all KFD BOs that belong to a KFD process. */
134a46a2cd1SFelix Kuehling struct list_head kfd_bo_list;
1355ae0283eSFelix Kuehling /* List of userptr BOs that are valid or invalid */
1365ae0283eSFelix Kuehling struct list_head userptr_valid_list;
1375ae0283eSFelix Kuehling struct list_head userptr_inval_list;
138a46a2cd1SFelix Kuehling /* Lock to protect kfd_bo_list */
139a46a2cd1SFelix Kuehling struct mutex lock;
140a46a2cd1SFelix Kuehling
141a46a2cd1SFelix Kuehling /* Number of VMs */
142a46a2cd1SFelix Kuehling unsigned int n_vms;
143a46a2cd1SFelix Kuehling /* Eviction Fence */
144a46a2cd1SFelix Kuehling struct amdgpu_amdkfd_fence *eviction_fence;
1455ae0283eSFelix Kuehling
1465ae0283eSFelix Kuehling /* MMU-notifier related fields */
147f95f51a4SFelix Kuehling struct mutex notifier_lock;
148f95f51a4SFelix Kuehling uint32_t evicted_bos;
1495ae0283eSFelix Kuehling struct delayed_work restore_userptr_work;
1505ae0283eSFelix Kuehling struct pid *pid;
151011bbb03SRajneesh Bhardwaj bool block_mmu_notifications;
152a46a2cd1SFelix Kuehling };
153a46a2cd1SFelix Kuehling
154efb1c658SOded Gabbay int amdgpu_amdkfd_init(void);
155130e0371SOded Gabbay void amdgpu_amdkfd_fini(void);
156130e0371SOded Gabbay
1579593f4d6SRajneesh Bhardwaj void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
1589593f4d6SRajneesh Bhardwaj int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
159dc102c43SAndres Rodriguez void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
160130e0371SOded Gabbay const void *ih_ring_entry);
161dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
162dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
163e9669fb7SAndrey Grodzovsky void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev);
1640c7315e7SMukul Joshi int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev);
1650c7315e7SMukul Joshi void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev);
1666bfc7c7eSGraham Sider int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
1676bfc7c7eSGraham Sider enum kgd_engine_type engine,
1684c660c8fSFelix Kuehling uint32_t vmid, uint64_t gpu_addr,
1694c660c8fSFelix Kuehling uint32_t *ib_cmd, uint32_t ib_len);
1706bfc7c7eSGraham Sider void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle);
1716bfc7c7eSGraham Sider bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
1724c660c8fSFelix Kuehling
173155494dbSFelix Kuehling bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
174155494dbSFelix Kuehling
175dbe2c4c8SEric Huang int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev,
176dbe2c4c8SEric Huang struct amdgpu_reset_context *reset_context);
1775c6dd71eSShaoyun Liu
1785c6dd71eSShaoyun Liu int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
1795c6dd71eSShaoyun Liu
1806bfc7c7eSGraham Sider void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev);
18124da5a9cSShaoyun Liu
182d09f85d5SYong Zhao int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
183d09f85d5SYong Zhao int queue_bit);
184d09f85d5SYong Zhao
185cd63989eSLang Yu struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
186eb2cec55SAlex Sierra struct mm_struct *mm,
187eb2cec55SAlex Sierra struct svm_range_bo *svm_bo);
188c0125b84SLe Ma
189c0125b84SLe Ma int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev);
1903d2af401SAlex Sierra #if defined(CONFIG_DEBUG_FS)
1913d2af401SAlex Sierra int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
1923d2af401SAlex Sierra #endif
193cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD)
194cd63989eSLang Yu bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
195cd63989eSLang Yu struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
196*cb0de06dSChristian König void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo);
197f95f51a4SFelix Kuehling int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
198f95f51a4SFelix Kuehling unsigned long cur_seq, struct kgd_mem *mem);
19950661eb1SFelix Kuehling int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
20050661eb1SFelix Kuehling uint32_t domain,
20150661eb1SFelix Kuehling struct dma_fence *fence);
202cd63989eSLang Yu #else
203cd63989eSLang Yu static inline
amdkfd_fence_check_mm(struct dma_fence * f,struct mm_struct * mm)204cd63989eSLang Yu bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
205cd63989eSLang Yu {
206cd63989eSLang Yu return false;
207cd63989eSLang Yu }
208cd63989eSLang Yu
209cd63989eSLang Yu static inline
to_amdgpu_amdkfd_fence(struct dma_fence * f)210cd63989eSLang Yu struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
211cd63989eSLang Yu {
212cd63989eSLang Yu return NULL;
213cd63989eSLang Yu }
214cd63989eSLang Yu
215cd63989eSLang Yu static inline
amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo * bo)216*cb0de06dSChristian König void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
217cd63989eSLang Yu {
218cd63989eSLang Yu }
219cd63989eSLang Yu
220cd63989eSLang Yu static inline
amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier * mni,unsigned long cur_seq,struct kgd_mem * mem)221f95f51a4SFelix Kuehling int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
222f95f51a4SFelix Kuehling unsigned long cur_seq, struct kgd_mem *mem)
223cd63989eSLang Yu {
224cd63989eSLang Yu return 0;
225cd63989eSLang Yu }
22650661eb1SFelix Kuehling static inline
amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo * bo,uint32_t domain,struct dma_fence * fence)22750661eb1SFelix Kuehling int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
22850661eb1SFelix Kuehling uint32_t domain,
22950661eb1SFelix Kuehling struct dma_fence *fence)
23050661eb1SFelix Kuehling {
23150661eb1SFelix Kuehling return 0;
23250661eb1SFelix Kuehling }
233cd63989eSLang Yu #endif
234130e0371SOded Gabbay /* Shared API */
2356bfc7c7eSGraham Sider int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
236130e0371SOded Gabbay void **mem_obj, uint64_t *gpu_addr,
23715426dbbSYong Zhao void **cpu_ptr, bool mqd_gfx9);
238c86ad391SPhilip Yang void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj);
2396bfc7c7eSGraham Sider int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
2406bfc7c7eSGraham Sider void **mem_obj);
2416bfc7c7eSGraham Sider void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
24271efab6aSOak Zeng int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
24371efab6aSOak Zeng int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
244574c4183SGraham Sider uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
2450da8b10eSAmber Lin enum kgd_engine_type type);
246574c4183SGraham Sider void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
247315e29ecSMukul Joshi struct kfd_local_mem_info *mem_info,
2489a3ce1a7SHawking Zhang struct amdgpu_xcp *xcp);
249574c4183SGraham Sider uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev);
250130e0371SOded Gabbay
251574c4183SGraham Sider uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev);
252574c4183SGraham Sider int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
253574c4183SGraham Sider struct amdgpu_device **dmabuf_adev,
2541dde0ea9SFelix Kuehling uint64_t *bo_size, void *metadata_buffer,
2551dde0ea9SFelix Kuehling size_t buffer_size, uint32_t *metadata_size,
2562fa9ff25SPhilip Yang uint32_t *flags, int8_t *xcp_id);
257574c4183SGraham Sider int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
25812fb1ad7SJonathan Kim int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
25912fb1ad7SJonathan Kim uint32_t *payload);
2609041b53aSMukul Joshi int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
2619041b53aSMukul Joshi u32 inst);
262234eebe1SAmber Lin int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id);
263234eebe1SAmber Lin int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id);
2643eebfd5eSFeifei Xu int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id,
2653eebfd5eSFeifei Xu bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
2668fe7cf58SAlex Deucher bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id);
2673eebfd5eSFeifei Xu
268130e0371SOded Gabbay
269cd05c865SFelix Kuehling /* Read user wptr from a specified user address space with page fault
270cd05c865SFelix Kuehling * disabled. The memory must be pinned and mapped to the hardware when
271cd05c865SFelix Kuehling * this is called in hqd_load functions, so it should never fault in
272cd05c865SFelix Kuehling * the first place. This resolves a circular lock dependency involving
273c1e8d7c6SMichel Lespinasse * four locks, including the DQM lock and mmap_lock.
274cd05c865SFelix Kuehling */
27570539bd7SFelix Kuehling #define read_user_wptr(mmptr, wptr, dst) \
27670539bd7SFelix Kuehling ({ \
27770539bd7SFelix Kuehling bool valid = false; \
27870539bd7SFelix Kuehling if ((mmptr) && (wptr)) { \
279cd05c865SFelix Kuehling pagefault_disable(); \
28070539bd7SFelix Kuehling if ((mmptr) == current->mm) { \
28170539bd7SFelix Kuehling valid = !get_user((dst), (wptr)); \
2828449d150SChristoph Hellwig } else if (current->flags & PF_KTHREAD) { \
283f5678e7fSChristoph Hellwig kthread_use_mm(mmptr); \
28470539bd7SFelix Kuehling valid = !get_user((dst), (wptr)); \
285f5678e7fSChristoph Hellwig kthread_unuse_mm(mmptr); \
28670539bd7SFelix Kuehling } \
287cd05c865SFelix Kuehling pagefault_enable(); \
28870539bd7SFelix Kuehling } \
28970539bd7SFelix Kuehling valid; \
29070539bd7SFelix Kuehling })
29170539bd7SFelix Kuehling
292a46a2cd1SFelix Kuehling /* GPUVM API */
293f80fe9d3SFelix Kuehling #define drm_priv_to_vm(drm_priv) \
294f80fe9d3SFelix Kuehling (&((struct amdgpu_fpriv *) \
295f80fe9d3SFelix Kuehling ((struct drm_file *)(drm_priv))->driver_priv)->vm)
296f80fe9d3SFelix Kuehling
297dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
29823b02b0eSPhilip Yang struct amdgpu_vm *avm,
299b40a6ab2SFelix Kuehling void **process_info,
300ede0dd86SFelix Kuehling struct dma_fence **ef);
301b40a6ab2SFelix Kuehling uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
3021c77527aSMukul Joshi size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
3031c77527aSMukul Joshi uint8_t xcp_id);
304a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
305dff63da9SGraham Sider struct amdgpu_device *adev, uint64_t va, uint64_t size,
306b40a6ab2SFelix Kuehling void *drm_priv, struct kgd_mem **mem,
307011bbb03SRajneesh Bhardwaj uint64_t *offset, uint32_t flags, bool criu_resume);
308a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
309dff63da9SGraham Sider struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
310d4ec4bdcSFelix Kuehling uint64_t *size);
3114d30a83cSChristian König int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
3124d30a83cSChristian König struct kgd_mem *mem, void *drm_priv);
313a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
314dff63da9SGraham Sider struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
3150c93bd49SLang Yu int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
316a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_sync_memory(
317dff63da9SGraham Sider struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
3184e2d1044SFelix Kuehling int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
3194e2d1044SFelix Kuehling void **kptr, uint64_t *size);
3204e2d1044SFelix Kuehling void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
32168df0f19SLang Yu
322fb910658SPhilip Yang int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart);
323e77a541fSGraham Sider
324a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
325c147ddc6SFelix Kuehling struct dma_fence __rcu **ef);
326dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
327b97dfa27Sshaoyunl struct kfd_vm_fault_info *info);
3280188006dSFelix Kuehling int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
329b40a6ab2SFelix Kuehling uint64_t va, void *drm_priv,
3301dde0ea9SFelix Kuehling struct kgd_mem **mem, uint64_t *size,
3311dde0ea9SFelix Kuehling uint64_t *mmap_offset);
332fd234e75SFelix Kuehling int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
333fd234e75SFelix Kuehling struct dma_buf **dmabuf);
334a70a93faSJonathan Kim void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
335dff63da9SGraham Sider int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
336fd7d08baSYong Zhao struct tile_config *config);
337b6485bedSTao Zhou void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
3382fc46e0bSTao Zhou enum amdgpu_ras_block block, uint32_t reset);
339bfa579b3SYiPeng Chai
340bfa579b3SYiPeng Chai void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev,
341bfa579b3SYiPeng Chai enum amdgpu_ras_block block, uint16_t pasid,
342bfa579b3SYiPeng Chai pasid_notify pasid_fn, void *data, uint32_t reset);
343bfa579b3SYiPeng Chai
344e1f6746fSLijo Lazar bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
345f9e292cbSPhilip Yang bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem);
346011bbb03SRajneesh Bhardwaj void amdgpu_amdkfd_block_mmu_notifications(void *p);
347011bbb03SRajneesh Bhardwaj int amdgpu_amdkfd_criu_resume(void *p);
348f9af3c16SAlex Sierra int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
3491c77527aSMukul Joshi uint64_t size, u32 alloc_flag, int8_t xcp_id);
350f9af3c16SAlex Sierra void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
3511c77527aSMukul Joshi uint64_t size, u32 alloc_flag, int8_t xcp_id);
352011bbb03SRajneesh Bhardwaj
35345b3a914SAlex Deucher u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id);
35445b3a914SAlex Deucher
3553ebfd221SPhilip Yang #define KFD_XCP_MEM_ID(adev, xcp_id) \
3563ebfd221SPhilip Yang ((adev)->xcp_mgr && (xcp_id) >= 0 ?\
3573ebfd221SPhilip Yang (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1)
3583ebfd221SPhilip Yang
35945b3a914SAlex Deucher #define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id))
36045b3a914SAlex Deucher
3614c6ce75fSPhilip Yang
362cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD)
363cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
364cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
365cd63989eSLang Yu struct amdgpu_vm *vm);
366f441dd33SRamesh Errabolu
367f441dd33SRamesh Errabolu /**
368f441dd33SRamesh Errabolu * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released
369f441dd33SRamesh Errabolu *
370f441dd33SRamesh Errabolu * Allows KFD to release its resources associated with the GEM object.
371f441dd33SRamesh Errabolu */
3725702d052SFelix Kuehling void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo);
373c46ebb6aSPhilip Yang void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
374cd63989eSLang Yu #else
375cd63989eSLang Yu static inline
amdgpu_amdkfd_gpuvm_init_mem_limits(void)376cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
377cd63989eSLang Yu {
378cd63989eSLang Yu }
379fd7d08baSYong Zhao
380cd63989eSLang Yu static inline
amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device * adev,struct amdgpu_vm * vm)381cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
382cd63989eSLang Yu struct amdgpu_vm *vm)
383cd63989eSLang Yu {
384cd63989eSLang Yu }
385cd63989eSLang Yu
386cd63989eSLang Yu static inline
amdgpu_amdkfd_release_notify(struct amdgpu_bo * bo)3875702d052SFelix Kuehling void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
388cd63989eSLang Yu {
389cd63989eSLang Yu }
390cd63989eSLang Yu #endif
39184b4dd3fSPhilip Yang
39284b4dd3fSPhilip Yang #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
39384b4dd3fSPhilip Yang int kgd2kfd_init_zone_device(struct amdgpu_device *adev);
39484b4dd3fSPhilip Yang #else
39584b4dd3fSPhilip Yang static inline
kgd2kfd_init_zone_device(struct amdgpu_device * adev)39684b4dd3fSPhilip Yang int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
39784b4dd3fSPhilip Yang {
39884b4dd3fSPhilip Yang return 0;
39984b4dd3fSPhilip Yang }
40084b4dd3fSPhilip Yang #endif
40184b4dd3fSPhilip Yang
4022d3d25b6SAmber Lin /* KGD2KFD callbacks */
403c7f21978SPhilip Yang int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger);
404cd63989eSLang Yu int kgd2kfd_resume_mm(struct mm_struct *mm);
405cd63989eSLang Yu int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
406cd63989eSLang Yu struct dma_fence *fence);
407cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD)
408308176d6SAmber Lin int kgd2kfd_init(void);
4092d3d25b6SAmber Lin void kgd2kfd_exit(void);
410b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
4112d3d25b6SAmber Lin bool kgd2kfd_device_init(struct kfd_dev *kfd,
4122d3d25b6SAmber Lin const struct kgd2kfd_shared_resources *gpu_resources);
4132d3d25b6SAmber Lin void kgd2kfd_device_exit(struct kfd_dev *kfd);
4149593f4d6SRajneesh Bhardwaj void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
4159593f4d6SRajneesh Bhardwaj int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
416dbe2c4c8SEric Huang int kgd2kfd_pre_reset(struct kfd_dev *kfd,
417dbe2c4c8SEric Huang struct amdgpu_reset_context *reset_context);
4182d3d25b6SAmber Lin int kgd2kfd_post_reset(struct kfd_dev *kfd);
4192d3d25b6SAmber Lin void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
4209b54d201SEric Huang void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
421410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
4220c7315e7SMukul Joshi int kgd2kfd_check_and_lock_kfd(void);
4230c7315e7SMukul Joshi void kgd2kfd_unlock_kfd(void);
424234eebe1SAmber Lin int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
425234eebe1SAmber Lin int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
4268fe7cf58SAlex Deucher bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
4271b001432SPhilip Yang bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
4281b001432SPhilip Yang bool retry_fault);
4291b001432SPhilip Yang
430cd63989eSLang Yu #else
kgd2kfd_init(void)431cd63989eSLang Yu static inline int kgd2kfd_init(void)
432cd63989eSLang Yu {
433cd63989eSLang Yu return -ENOENT;
434cd63989eSLang Yu }
4352d3d25b6SAmber Lin
kgd2kfd_exit(void)436cd63989eSLang Yu static inline void kgd2kfd_exit(void)
437cd63989eSLang Yu {
438cd63989eSLang Yu }
439cd63989eSLang Yu
440cd63989eSLang Yu static inline
kgd2kfd_probe(struct amdgpu_device * adev,bool vf)441b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
442cd63989eSLang Yu {
443cd63989eSLang Yu return NULL;
444cd63989eSLang Yu }
445cd63989eSLang Yu
446cd63989eSLang Yu static inline
kgd2kfd_device_init(struct kfd_dev * kfd,const struct kgd2kfd_shared_resources * gpu_resources)447d69a3b76SMukul Joshi bool kgd2kfd_device_init(struct kfd_dev *kfd,
448cd63989eSLang Yu const struct kgd2kfd_shared_resources *gpu_resources)
449cd63989eSLang Yu {
450cd63989eSLang Yu return false;
451cd63989eSLang Yu }
452cd63989eSLang Yu
kgd2kfd_device_exit(struct kfd_dev * kfd)453cd63989eSLang Yu static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
454cd63989eSLang Yu {
455cd63989eSLang Yu }
456cd63989eSLang Yu
kgd2kfd_suspend(struct kfd_dev * kfd,bool run_pm)457cd63989eSLang Yu static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
458cd63989eSLang Yu {
459cd63989eSLang Yu }
460cd63989eSLang Yu
kgd2kfd_resume(struct kfd_dev * kfd,bool run_pm)461cd63989eSLang Yu static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
462cd63989eSLang Yu {
463cd63989eSLang Yu return 0;
464cd63989eSLang Yu }
465cd63989eSLang Yu
kgd2kfd_pre_reset(struct kfd_dev * kfd,struct amdgpu_reset_context * reset_context)466dbe2c4c8SEric Huang static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd,
467dbe2c4c8SEric Huang struct amdgpu_reset_context *reset_context)
468cd63989eSLang Yu {
469cd63989eSLang Yu return 0;
470cd63989eSLang Yu }
471cd63989eSLang Yu
kgd2kfd_post_reset(struct kfd_dev * kfd)472cd63989eSLang Yu static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
473cd63989eSLang Yu {
474cd63989eSLang Yu return 0;
475cd63989eSLang Yu }
476cd63989eSLang Yu
477cd63989eSLang Yu static inline
kgd2kfd_interrupt(struct kfd_dev * kfd,const void * ih_ring_entry)478cd63989eSLang Yu void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
479cd63989eSLang Yu {
480cd63989eSLang Yu }
481cd63989eSLang Yu
482cd63989eSLang Yu static inline
kgd2kfd_set_sram_ecc_flag(struct kfd_dev * kfd)483cd63989eSLang Yu void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
484cd63989eSLang Yu {
485cd63989eSLang Yu }
486cd63989eSLang Yu
487cd63989eSLang Yu static inline
kgd2kfd_smi_event_throttle(struct kfd_dev * kfd,uint64_t throttle_bitmask)488410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
489cd63989eSLang Yu {
490cd63989eSLang Yu }
4910c7315e7SMukul Joshi
kgd2kfd_check_and_lock_kfd(void)4920c7315e7SMukul Joshi static inline int kgd2kfd_check_and_lock_kfd(void)
4930c7315e7SMukul Joshi {
4940c7315e7SMukul Joshi return 0;
4950c7315e7SMukul Joshi }
4960c7315e7SMukul Joshi
kgd2kfd_unlock_kfd(void)4970c7315e7SMukul Joshi static inline void kgd2kfd_unlock_kfd(void)
4980c7315e7SMukul Joshi {
4990c7315e7SMukul Joshi }
500234eebe1SAmber Lin
kgd2kfd_start_sched(struct kfd_dev * kfd,uint32_t node_id)501234eebe1SAmber Lin static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
502234eebe1SAmber Lin {
503234eebe1SAmber Lin return 0;
504234eebe1SAmber Lin }
505234eebe1SAmber Lin
kgd2kfd_stop_sched(struct kfd_dev * kfd,uint32_t node_id)506234eebe1SAmber Lin static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
507234eebe1SAmber Lin {
508234eebe1SAmber Lin return 0;
509234eebe1SAmber Lin }
5108fe7cf58SAlex Deucher
kgd2kfd_compute_active(struct kfd_dev * kfd,uint32_t node_id)5118fe7cf58SAlex Deucher static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
5128fe7cf58SAlex Deucher {
5138fe7cf58SAlex Deucher return false;
5148fe7cf58SAlex Deucher }
5151b001432SPhilip Yang
kgd2kfd_vmfault_fast_path(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry,bool retry_fault)5161b001432SPhilip Yang static inline bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
5171b001432SPhilip Yang bool retry_fault)
5181b001432SPhilip Yang {
5191b001432SPhilip Yang return false;
5201b001432SPhilip Yang }
5211b001432SPhilip Yang
522cd63989eSLang Yu #endif
523130e0371SOded Gabbay #endif /* AMDGPU_AMDKFD_H_INCLUDED */
524