1ceeb50edSMonk Liu /*
2ceeb50edSMonk Liu * Copyright 2016 Advanced Micro Devices, Inc.
3ceeb50edSMonk Liu *
4ceeb50edSMonk Liu * Permission is hereby granted, free of charge, to any person obtaining a
5ceeb50edSMonk Liu * copy of this software and associated documentation files (the "Software"),
6ceeb50edSMonk Liu * to deal in the Software without restriction, including without limitation
7ceeb50edSMonk Liu * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8ceeb50edSMonk Liu * and/or sell copies of the Software, and to permit persons to whom the
9ceeb50edSMonk Liu * Software is furnished to do so, subject to the following conditions:
10ceeb50edSMonk Liu *
11ceeb50edSMonk Liu * The above copyright notice and this permission notice shall be included in
12ceeb50edSMonk Liu * all copies or substantial portions of the Software.
13ceeb50edSMonk Liu *
14ceeb50edSMonk Liu * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15ceeb50edSMonk Liu * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16ceeb50edSMonk Liu * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17ceeb50edSMonk Liu * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18ceeb50edSMonk Liu * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19ceeb50edSMonk Liu * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20ceeb50edSMonk Liu * OTHER DEALINGS IN THE SOFTWARE.
21ceeb50edSMonk Liu *
22ceeb50edSMonk Liu * Author: [email protected]
23ceeb50edSMonk Liu */
24ceeb50edSMonk Liu #ifndef AMDGPU_VIRT_H
25ceeb50edSMonk Liu #define AMDGPU_VIRT_H
26ceeb50edSMonk Liu
271721bc1bSBokun Zhang #include "amdgv_sriovmsg.h"
281721bc1bSBokun Zhang
29ceeb50edSMonk Liu #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */
30ceeb50edSMonk Liu #define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
31ceeb50edSMonk Liu #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
32ceeb50edSMonk Liu #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
335ec9f06eSXiangliang Yu #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
34a7310d8dSDanijel Slivka #define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */
35bd7de27dSMonk Liu
3629dbcac8SHawking Zhang /* flags for indirect register access path supported by rlcg for sriov */
3729dbcac8SHawking Zhang #define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28)
3829dbcac8SHawking Zhang #define AMDGPU_RLCG_GC_WRITE (0x0 << 28)
3929dbcac8SHawking Zhang #define AMDGPU_RLCG_GC_READ (0x1 << 28)
4029dbcac8SHawking Zhang #define AMDGPU_RLCG_MMHUB_WRITE (0x2 << 28)
4129dbcac8SHawking Zhang
425d447e29SHawking Zhang /* error code for indirect register access path supported by rlcg for sriov */
435d447e29SHawking Zhang #define AMDGPU_RLCG_VFGATE_DISABLED 0x4000000
445d447e29SHawking Zhang #define AMDGPU_RLCG_WRONG_OPERATION_TYPE 0x2000000
455d447e29SHawking Zhang #define AMDGPU_RLCG_REG_NOT_IN_RANGE 0x1000000
465d447e29SHawking Zhang
47aa79d380SVictor Skvortsov #define AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK 0xFFFFF
488093383aSVictor Lu #define AMDGPU_RLCG_SCRATCH1_ERROR_MASK 0xF000000
49aa79d380SVictor Skvortsov
503aa0115dSMonk Liu /* all asic after AI use this offset */
513aa0115dSMonk Liu #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
523aa0115dSMonk Liu /* tonga/fiji use this offset */
533aa0115dSMonk Liu #define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
543aa0115dSMonk Liu
55cbda2758SVignesh Chander #define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2
56ab66c832SZhigang Luo
57a7f28103SKevin Wang enum amdgpu_sriov_vf_mode {
58a7f28103SKevin Wang SRIOV_VF_MODE_BARE_METAL = 0,
59a7f28103SKevin Wang SRIOV_VF_MODE_ONE_VF,
60a7f28103SKevin Wang SRIOV_VF_MODE_MULTI_VF,
61a7f28103SKevin Wang };
62a7f28103SKevin Wang
63ecb2b9c6SXiangliang Yu struct amdgpu_mm_table {
64ecb2b9c6SXiangliang Yu struct amdgpu_bo *bo;
65ecb2b9c6SXiangliang Yu uint32_t *cpu_addr;
66ecb2b9c6SXiangliang Yu uint64_t gpu_addr;
67ecb2b9c6SXiangliang Yu };
68ecb2b9c6SXiangliang Yu
69e23b74aaSAlex Deucher #define AMDGPU_VF_ERROR_ENTRY_SIZE 16
70e23b74aaSAlex Deucher
71e23b74aaSAlex Deucher /* struct error_entry - amdgpu VF error information. */
72e23b74aaSAlex Deucher struct amdgpu_vf_error_buffer {
73e23b74aaSAlex Deucher struct mutex lock;
74e23b74aaSAlex Deucher int read_count;
75e23b74aaSAlex Deucher int write_count;
76e23b74aaSAlex Deucher uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
77e23b74aaSAlex Deucher uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
78e23b74aaSAlex Deucher uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
79e23b74aaSAlex Deucher };
80e23b74aaSAlex Deucher
81f0d0f108SNathan Chancellor enum idh_request;
82f0d0f108SNathan Chancellor
831e9f1392SXiangliang Yu /**
841e9f1392SXiangliang Yu * struct amdgpu_virt_ops - amdgpu device virt operations
851e9f1392SXiangliang Yu */
861e9f1392SXiangliang Yu struct amdgpu_virt_ops {
871e9f1392SXiangliang Yu int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
881e9f1392SXiangliang Yu int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
89aa53bc2eSMonk Liu int (*req_init_data)(struct amdgpu_device *adev);
901e9f1392SXiangliang Yu int (*reset_gpu)(struct amdgpu_device *adev);
915c0a1cddSYunxiang Li void (*ready_to_reset)(struct amdgpu_device *adev);
92b636176eSpding int (*wait_reset)(struct amdgpu_device *adev);
93f0d0f108SNathan Chancellor void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
94f0d0f108SNathan Chancellor u32 data1, u32 data2, u32 data3);
95ed1e1e42SYiPeng Chai void (*ras_poison_handler)(struct amdgpu_device *adev,
96ed1e1e42SYiPeng Chai enum amdgpu_ras_block block);
97cbda2758SVignesh Chander bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
989928509dSVictor Skvortsov int (*req_ras_err_count)(struct amdgpu_device *adev);
99a91d91b6STony Yi int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
1001e9f1392SXiangliang Yu };
1011e9f1392SXiangliang Yu
1022dc8f81eSHorace Chen /*
1032dc8f81eSHorace Chen * Firmware Reserve Frame buffer
1042dc8f81eSHorace Chen */
1052dc8f81eSHorace Chen struct amdgpu_virt_fw_reserve {
106bed1ed36SEmily Deng struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
107bed1ed36SEmily Deng struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
10884a2947eSVictor Skvortsov void *ras_telemetry;
1092dc8f81eSHorace Chen unsigned int checksum_key;
1102dc8f81eSHorace Chen };
1111721bc1bSBokun Zhang
1122dc8f81eSHorace Chen /*
1131721bc1bSBokun Zhang * Legacy GIM header
1141721bc1bSBokun Zhang *
1152dc8f81eSHorace Chen * Defination between PF and VF
1162dc8f81eSHorace Chen * Structures forcibly aligned to 4 to keep the same style as PF.
1172dc8f81eSHorace Chen */
1182dc8f81eSHorace Chen #define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024)
1192dc8f81eSHorace Chen
1202dc8f81eSHorace Chen #define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \
1212dc8f81eSHorace Chen (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2))
1222dc8f81eSHorace Chen
1232dc8f81eSHorace Chen enum AMDGIM_FEATURE_FLAG {
1242dc8f81eSHorace Chen /* GIM supports feature of Error log collecting */
1252dc8f81eSHorace Chen AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
1262dc8f81eSHorace Chen /* GIM supports feature of loading uCodes */
1272dc8f81eSHorace Chen AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
12875bc6099SMonk Liu /* VRAM LOST by GIM */
12975bc6099SMonk Liu AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
130b89659b7SMonk Liu /* MM bandwidth */
131b89659b7SMonk Liu AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8,
132c9ffa427SYintian Tao /* PP ONE VF MODE in GIM */
133c9ffa427SYintian Tao AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
1344d675e1eSRohit Khaire /* Indirect Reg Access enabled */
1354d675e1eSRohit Khaire AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
136d71e38dfSJane Jian /* AV1 Support MODE*/
137d71e38dfSJane Jian AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
138fc313673SBokun Zhang /* VCN RB decouple */
139fc313673SBokun Zhang AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7),
140f6ac0842Schongli2 /* MES info */
141f6ac0842Schongli2 AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8),
142907fec2dSVictor Skvortsov AMDGIM_FEATURE_RAS_CAPS = (1 << 9),
14384a2947eSVictor Skvortsov AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
144a91d91b6STony Yi AMDGIM_FEATURE_RAS_CPER = (1 << 11),
1454d675e1eSRohit Khaire };
1464d675e1eSRohit Khaire
1474d675e1eSRohit Khaire enum AMDGIM_REG_ACCESS_FLAG {
1484d675e1eSRohit Khaire /* Use PSP to program IH_RB_CNTL */
1494d675e1eSRohit Khaire AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
1504d675e1eSRohit Khaire /* Use RLC to program MMHUB regs */
1518b8a162dSPeng Ju Zhou AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
1524d675e1eSRohit Khaire /* Use RLC to program GC regs */
1538b8a162dSPeng Ju Zhou AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
1542dc8f81eSHorace Chen };
1552dc8f81eSHorace Chen
1562dc8f81eSHorace Chen struct amdgim_pf2vf_info_v1 {
1572dc8f81eSHorace Chen /* header contains size and version */
158bed1ed36SEmily Deng struct amd_sriov_msg_pf2vf_info_header header;
1592dc8f81eSHorace Chen /* max_width * max_height */
1602dc8f81eSHorace Chen unsigned int uvd_enc_max_pixels_count;
1612dc8f81eSHorace Chen /* 16x16 pixels/sec, codec independent */
1622dc8f81eSHorace Chen unsigned int uvd_enc_max_bandwidth;
1632dc8f81eSHorace Chen /* max_width * max_height */
1642dc8f81eSHorace Chen unsigned int vce_enc_max_pixels_count;
1652dc8f81eSHorace Chen /* 16x16 pixels/sec, codec independent */
1662dc8f81eSHorace Chen unsigned int vce_enc_max_bandwidth;
1672dc8f81eSHorace Chen /* MEC FW position in kb from the start of visible frame buffer */
1682dc8f81eSHorace Chen unsigned int mecfw_kboffset;
1692dc8f81eSHorace Chen /* The features flags of the GIM driver supports. */
1702dc8f81eSHorace Chen unsigned int feature_flags;
1712dc8f81eSHorace Chen /* use private key from mailbox 2 to create chueksum */
1722dc8f81eSHorace Chen unsigned int checksum;
1732dc8f81eSHorace Chen } __aligned(4);
1742dc8f81eSHorace Chen
1752dc8f81eSHorace Chen struct amdgim_vf2pf_info_v1 {
1762dc8f81eSHorace Chen /* header contains size and version */
177bed1ed36SEmily Deng struct amd_sriov_msg_vf2pf_info_header header;
1782dc8f81eSHorace Chen /* driver version */
1792dc8f81eSHorace Chen char driver_version[64];
1802dc8f81eSHorace Chen /* driver certification, 1=WHQL, 0=None */
1812dc8f81eSHorace Chen unsigned int driver_cert;
1822dc8f81eSHorace Chen /* guest OS type and version: need a define */
1832dc8f81eSHorace Chen unsigned int os_info;
1842dc8f81eSHorace Chen /* in the unit of 1M */
1852dc8f81eSHorace Chen unsigned int fb_usage;
1862dc8f81eSHorace Chen /* guest gfx engine usage percentage */
1872dc8f81eSHorace Chen unsigned int gfx_usage;
1882dc8f81eSHorace Chen /* guest gfx engine health percentage */
1892dc8f81eSHorace Chen unsigned int gfx_health;
1902dc8f81eSHorace Chen /* guest compute engine usage percentage */
1912dc8f81eSHorace Chen unsigned int compute_usage;
1922dc8f81eSHorace Chen /* guest compute engine health percentage */
1932dc8f81eSHorace Chen unsigned int compute_health;
1942dc8f81eSHorace Chen /* guest vce engine usage percentage. 0xffff means N/A. */
1952dc8f81eSHorace Chen unsigned int vce_enc_usage;
1962dc8f81eSHorace Chen /* guest vce engine health percentage. 0xffff means N/A. */
1972dc8f81eSHorace Chen unsigned int vce_enc_health;
1982dc8f81eSHorace Chen /* guest uvd engine usage percentage. 0xffff means N/A. */
1992dc8f81eSHorace Chen unsigned int uvd_enc_usage;
2002dc8f81eSHorace Chen /* guest uvd engine usage percentage. 0xffff means N/A. */
2012dc8f81eSHorace Chen unsigned int uvd_enc_health;
2022dc8f81eSHorace Chen unsigned int checksum;
2032dc8f81eSHorace Chen } __aligned(4);
2042dc8f81eSHorace Chen
2052dc8f81eSHorace Chen struct amdgim_vf2pf_info_v2 {
2062dc8f81eSHorace Chen /* header contains size and version */
207bed1ed36SEmily Deng struct amd_sriov_msg_vf2pf_info_header header;
2082dc8f81eSHorace Chen uint32_t checksum;
2092dc8f81eSHorace Chen /* driver version */
2102dc8f81eSHorace Chen uint8_t driver_version[64];
2112dc8f81eSHorace Chen /* driver certification, 1=WHQL, 0=None */
2122dc8f81eSHorace Chen uint32_t driver_cert;
2132dc8f81eSHorace Chen /* guest OS type and version: need a define */
2142dc8f81eSHorace Chen uint32_t os_info;
2152dc8f81eSHorace Chen /* in the unit of 1M */
2162dc8f81eSHorace Chen uint32_t fb_usage;
2172dc8f81eSHorace Chen /* guest gfx engine usage percentage */
2182dc8f81eSHorace Chen uint32_t gfx_usage;
2192dc8f81eSHorace Chen /* guest gfx engine health percentage */
2202dc8f81eSHorace Chen uint32_t gfx_health;
2212dc8f81eSHorace Chen /* guest compute engine usage percentage */
2222dc8f81eSHorace Chen uint32_t compute_usage;
2232dc8f81eSHorace Chen /* guest compute engine health percentage */
2242dc8f81eSHorace Chen uint32_t compute_health;
2252dc8f81eSHorace Chen /* guest vce engine usage percentage. 0xffff means N/A. */
2262dc8f81eSHorace Chen uint32_t vce_enc_usage;
2272dc8f81eSHorace Chen /* guest vce engine health percentage. 0xffff means N/A. */
2282dc8f81eSHorace Chen uint32_t vce_enc_health;
2292dc8f81eSHorace Chen /* guest uvd engine usage percentage. 0xffff means N/A. */
2302dc8f81eSHorace Chen uint32_t uvd_enc_usage;
2312dc8f81eSHorace Chen /* guest uvd engine usage percentage. 0xffff means N/A. */
2322dc8f81eSHorace Chen uint32_t uvd_enc_health;
233bed1ed36SEmily Deng uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
2342dc8f81eSHorace Chen } __aligned(4);
2352dc8f81eSHorace Chen
2365278a159SStanley.Yang struct amdgpu_virt_ras_err_handler_data {
2375278a159SStanley.Yang /* point to bad page records array */
2385278a159SStanley.Yang struct eeprom_table_record *bps;
2395278a159SStanley.Yang /* point to reserved bo array */
2405278a159SStanley.Yang struct amdgpu_bo **bps_bo;
2415278a159SStanley.Yang /* the count of entries */
2425278a159SStanley.Yang int count;
2435278a159SStanley.Yang /* last reserved entry's index + 1 */
2445278a159SStanley.Yang int last_reserved;
2455278a159SStanley.Yang };
2465278a159SStanley.Yang
247a91d91b6STony Yi struct amdgpu_virt_ras {
248a91d91b6STony Yi struct ratelimit_state ras_error_cnt_rs;
249a91d91b6STony Yi struct ratelimit_state ras_cper_dump_rs;
250a91d91b6STony Yi struct mutex ras_telemetry_mutex;
251a91d91b6STony Yi uint64_t cper_rptr;
252a91d91b6STony Yi };
253a91d91b6STony Yi
254ceeb50edSMonk Liu /* GPU virtualization */
2555a5099cbSXiangliang Yu struct amdgpu_virt {
2565a5099cbSXiangliang Yu uint32_t caps;
257bd7de27dSMonk Liu struct amdgpu_bo *csa_obj;
25843974dacSJack Xiao void *csa_cpu_addr;
259ae65a26dSMonk Liu bool chained_ib_support;
260880e87e3SXiangliang Yu uint32_t reg_val_offs;
261ab71ac56SXiangliang Yu struct amdgpu_irq_src ack_irq;
262ab71ac56SXiangliang Yu struct amdgpu_irq_src rcv_irq;
263480da262SMonk Liu struct work_struct flr_work;
264ecb2b9c6SXiangliang Yu struct amdgpu_mm_table mm_table;
2651e9f1392SXiangliang Yu const struct amdgpu_virt_ops *ops;
266e23b74aaSAlex Deucher struct amdgpu_vf_error_buffer vf_errors;
2672dc8f81eSHorace Chen struct amdgpu_virt_fw_reserve fw_reserve;
26875bc6099SMonk Liu uint32_t gim_feature;
26978d48112STrigger Huang uint32_t reg_access_mode;
270aa53bc2eSMonk Liu int req_init_data_ver;
27195a2f917SYintian Tao bool tdr_debug;
2725278a159SStanley.Yang struct amdgpu_virt_ras_err_handler_data *virt_eh_data;
2735278a159SStanley.Yang bool ras_init_done;
2745d238510SPeng Ju Zhou uint32_t reg_access;
275519b8b76SBokun Zhang
276519b8b76SBokun Zhang /* vf2pf message */
277519b8b76SBokun Zhang struct delayed_work vf2pf_work;
278519b8b76SBokun Zhang uint32_t vf2pf_update_interval_ms;
279ab66c832SZhigang Luo int vf2pf_update_retry_cnt;
280ed9d2053SBokun Zhang
281ed9d2053SBokun Zhang /* multimedia bandwidth config */
282ed9d2053SBokun Zhang bool is_mm_bw_enabled;
283ed9d2053SBokun Zhang uint32_t decode_max_dimension_pixels;
284ed9d2053SBokun Zhang uint32_t decode_max_frame_pixels;
285ed9d2053SBokun Zhang uint32_t encode_max_dimension_pixels;
286ed9d2053SBokun Zhang uint32_t encode_max_frame_pixels;
287f8bd7321SHorace Chen
288f8bd7321SHorace Chen /* the ucode id to signal the autoload */
289f8bd7321SHorace Chen uint32_t autoload_ucode_id;
290e864180eSVictor Skvortsov
291dc0297f3SSrinivasan Shanmugam /* Spinlock to protect access to the RLCG register interface */
292dc0297f3SSrinivasan Shanmugam spinlock_t rlcg_reg_lock;
293907fec2dSVictor Skvortsov
294907fec2dSVictor Skvortsov union amd_sriov_ras_caps ras_en_caps;
29584a2947eSVictor Skvortsov union amd_sriov_ras_caps ras_telemetry_en_caps;
296a91d91b6STony Yi struct amdgpu_virt_ras ras;
29784a2947eSVictor Skvortsov struct amd_sriov_ras_telemetry_error_count count_cache;
298ceeb50edSMonk Liu };
299ceeb50edSMonk Liu
300ed9d2053SBokun Zhang struct amdgpu_video_codec_info;
301ed9d2053SBokun Zhang
302ceeb50edSMonk Liu #define amdgpu_sriov_enabled(adev) \
3035a5099cbSXiangliang Yu ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
304ceeb50edSMonk Liu
305ceeb50edSMonk Liu #define amdgpu_sriov_vf(adev) \
3065a5099cbSXiangliang Yu ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF)
307ceeb50edSMonk Liu
308ceeb50edSMonk Liu #define amdgpu_sriov_bios(adev) \
3095a5099cbSXiangliang Yu ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
310ceeb50edSMonk Liu
3115ec9f06eSXiangliang Yu #define amdgpu_sriov_runtime(adev) \
3125ec9f06eSXiangliang Yu ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
3135ec9f06eSXiangliang Yu
3142e0cc4d4SMonk Liu #define amdgpu_sriov_fullaccess(adev) \
3152e0cc4d4SMonk Liu (amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
3162e0cc4d4SMonk Liu
3175d238510SPeng Ju Zhou #define amdgpu_sriov_reg_indirect_en(adev) \
3185d238510SPeng Ju Zhou (amdgpu_sriov_vf((adev)) && \
3195d238510SPeng Ju Zhou ((adev)->virt.gim_feature & (AMDGIM_FEATURE_INDIRECT_REG_ACCESS)))
3205d238510SPeng Ju Zhou
3215d238510SPeng Ju Zhou #define amdgpu_sriov_reg_indirect_ih(adev) \
3225d238510SPeng Ju Zhou (amdgpu_sriov_vf((adev)) && \
3235d238510SPeng Ju Zhou ((adev)->virt.reg_access & (AMDGIM_FEATURE_IH_REG_PSP_EN)))
3245d238510SPeng Ju Zhou
3255d238510SPeng Ju Zhou #define amdgpu_sriov_reg_indirect_mmhub(adev) \
3265d238510SPeng Ju Zhou (amdgpu_sriov_vf((adev)) && \
3275d238510SPeng Ju Zhou ((adev)->virt.reg_access & (AMDGIM_FEATURE_MMHUB_REG_RLC_EN)))
3285d238510SPeng Ju Zhou
3295d238510SPeng Ju Zhou #define amdgpu_sriov_reg_indirect_gc(adev) \
3305d238510SPeng Ju Zhou (amdgpu_sriov_vf((adev)) && \
3315d238510SPeng Ju Zhou ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
3325d238510SPeng Ju Zhou
3335d447e29SHawking Zhang #define amdgpu_sriov_rlcg_error_report_enabled(adev) \
3345d447e29SHawking Zhang (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
3355d447e29SHawking Zhang
336ceeb50edSMonk Liu #define amdgpu_passthrough(adev) \
3375a5099cbSXiangliang Yu ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
338ceeb50edSMonk Liu
339a7310d8dSDanijel Slivka #define amdgpu_sriov_vf_mmio_access_protection(adev) \
340a7310d8dSDanijel Slivka ((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
341a7310d8dSDanijel Slivka
342907fec2dSVictor Skvortsov #define amdgpu_sriov_ras_caps_en(adev) \
343907fec2dSVictor Skvortsov ((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CAPS)
344907fec2dSVictor Skvortsov
34584a2947eSVictor Skvortsov #define amdgpu_sriov_ras_telemetry_en(adev) \
34684a2947eSVictor Skvortsov (((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_TELEMETRY) && (adev)->virt.fw_reserve.ras_telemetry)
34784a2947eSVictor Skvortsov
34884a2947eSVictor Skvortsov #define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \
34984a2947eSVictor Skvortsov (amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk))
35084a2947eSVictor Skvortsov
351a91d91b6STony Yi #define amdgpu_sriov_ras_cper_en(adev) \
352a91d91b6STony Yi ((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER)
353a91d91b6STony Yi
is_virtual_machine(void)354ceeb50edSMonk Liu static inline bool is_virtual_machine(void)
355ceeb50edSMonk Liu {
356039cacd2SVictor Zhao #if defined(CONFIG_X86)
357ceeb50edSMonk Liu return boot_cpu_has(X86_FEATURE_HYPERVISOR);
358039cacd2SVictor Zhao #elif defined(CONFIG_ARM64)
359039cacd2SVictor Zhao return !is_kernel_in_hyp_mode();
360ceeb50edSMonk Liu #else
361ceeb50edSMonk Liu return false;
362ceeb50edSMonk Liu #endif
363ceeb50edSMonk Liu }
364ceeb50edSMonk Liu
365c9ffa427SYintian Tao #define amdgpu_sriov_is_pp_one_vf(adev) \
366c9ffa427SYintian Tao ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
367*8d5e70baSEmily Deng #define amdgpu_sriov_multi_vf_mode(adev) \
368*8d5e70baSEmily Deng (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
36995a2f917SYintian Tao #define amdgpu_sriov_is_debug(adev) \
37053b3f8f4SDennis Li ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
371d32709daSYintian Tao #define amdgpu_sriov_is_normal(adev) \
37253b3f8f4SDennis Li ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
373d71e38dfSJane Jian #define amdgpu_sriov_is_av1_support(adev) \
374d71e38dfSJane Jian ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
375fc313673SBokun Zhang #define amdgpu_sriov_is_vcn_rb_decouple(adev) \
376fc313673SBokun Zhang ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
377f6ac0842Schongli2 #define amdgpu_sriov_is_mes_info_enable(adev) \
378f6ac0842Schongli2 ((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE)
379a16f8f11Spding bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
380bc992ba5SXiangliang Yu void amdgpu_virt_init_setting(struct amdgpu_device *adev);
3811e9f1392SXiangliang Yu int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
3821e9f1392SXiangliang Yu int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
3831e9f1392SXiangliang Yu int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
384aa53bc2eSMonk Liu void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
3855c0a1cddSYunxiang Li void amdgpu_virt_ready_to_reset(struct amdgpu_device *adev);
386b636176eSpding int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
387904cd389SXiangliang Yu int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
388904cd389SXiangliang Yu void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
389cbda2758SVignesh Chander bool amdgpu_virt_rcvd_ras_interrupt(struct amdgpu_device *adev);
3905278a159SStanley.Yang void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
3912dc8f81eSHorace Chen void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
392892deb48SVictor Skvortsov void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
393519b8b76SBokun Zhang void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
394a91d91b6STony Yi void amdgpu_virt_init(struct amdgpu_device *adev);
39595a2f917SYintian Tao
39695a2f917SYintian Tao bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
39795a2f917SYintian Tao int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
39895a2f917SYintian Tao void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
399a7f28103SKevin Wang
400a7f28103SKevin Wang enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev);
401ed9d2053SBokun Zhang
402ed9d2053SBokun Zhang void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
403ed9d2053SBokun Zhang struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
404ed9d2053SBokun Zhang struct amdgpu_video_codec_info *decode, uint32_t decode_array_size);
4055d447e29SHawking Zhang void amdgpu_sriov_wreg(struct amdgpu_device *adev,
4065d447e29SHawking Zhang u32 offset, u32 value,
4078ed49dd1SVictor Lu u32 acc_flags, u32 hwip, u32 xcc_id);
4085d447e29SHawking Zhang u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
4098ed49dd1SVictor Lu u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id);
410d9d86d08SHorace Chen bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
411d9d86d08SHorace Chen uint32_t ucode_id);
412f83cec3bSVictor Skvortsov void amdgpu_virt_pre_reset(struct amdgpu_device *adev);
41383f24a8fSHorace Chen void amdgpu_virt_post_reset(struct amdgpu_device *adev);
4149256e8d4SSurbhi Kakarya bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev);
41585150626SVictor Lu bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
41685150626SVictor Lu u32 acc_flags, u32 hwip,
41785150626SVictor Lu bool write, u32 *rlcg_flag);
41885150626SVictor Lu u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id);
419907fec2dSVictor Skvortsov bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev);
42084a2947eSVictor Skvortsov int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
42184a2947eSVictor Skvortsov struct ras_err_data *err_data);
422a91d91b6STony Yi int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update);
42384a2947eSVictor Skvortsov int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
42404893397SVictor Skvortsov bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
42504893397SVictor Skvortsov enum amdgpu_ras_block block);
426ceeb50edSMonk Liu #endif
427