1a5bde2f9SAlex Deucher /*
2a5bde2f9SAlex Deucher * Copyright 2016 Advanced Micro Devices, Inc.
3a5bde2f9SAlex Deucher *
4a5bde2f9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a
5a5bde2f9SAlex Deucher * copy of this software and associated documentation files (the "Software"),
6a5bde2f9SAlex Deucher * to deal in the Software without restriction, including without limitation
7a5bde2f9SAlex Deucher * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8a5bde2f9SAlex Deucher * and/or sell copies of the Software, and to permit persons to whom the
9a5bde2f9SAlex Deucher * Software is furnished to do so, subject to the following conditions:
10a5bde2f9SAlex Deucher *
11a5bde2f9SAlex Deucher * The above copyright notice and this permission notice shall be included in
12a5bde2f9SAlex Deucher * all copies or substantial portions of the Software.
13a5bde2f9SAlex Deucher *
14a5bde2f9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15a5bde2f9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16a5bde2f9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17a5bde2f9SAlex Deucher * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18a5bde2f9SAlex Deucher * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19a5bde2f9SAlex Deucher * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20a5bde2f9SAlex Deucher * OTHER DEALINGS IN THE SOFTWARE.
21a5bde2f9SAlex Deucher *
22a5bde2f9SAlex Deucher */
23fdf2f6c5SSam Ravnborg
24a5bde2f9SAlex Deucher #include <drm/amdgpu_drm.h>
25a5bde2f9SAlex Deucher #include "amdgpu.h"
26a5bde2f9SAlex Deucher #include "atomfirmware.h"
27a5bde2f9SAlex Deucher #include "amdgpu_atomfirmware.h"
28a5bde2f9SAlex Deucher #include "atom.h"
29692bb1acSHuang Rui #include "atombios.h"
30efe4f000STianci.Yin #include "soc15_hw_ip.h"
31a5bde2f9SAlex Deucher
325968c6a2SHawking Zhang union firmware_info {
335968c6a2SHawking Zhang struct atom_firmware_info_v3_1 v31;
345968c6a2SHawking Zhang struct atom_firmware_info_v3_2 v32;
355968c6a2SHawking Zhang struct atom_firmware_info_v3_3 v33;
365968c6a2SHawking Zhang struct atom_firmware_info_v3_4 v34;
373cfaadbeSLikun Gao struct atom_firmware_info_v3_5 v35;
385968c6a2SHawking Zhang };
395968c6a2SHawking Zhang
405968c6a2SHawking Zhang /*
415968c6a2SHawking Zhang * Helper function to query firmware capability
425968c6a2SHawking Zhang *
435968c6a2SHawking Zhang * @adev: amdgpu_device pointer
445968c6a2SHawking Zhang *
455968c6a2SHawking Zhang * Return firmware_capability in firmwareinfo table on success or 0 if not
465968c6a2SHawking Zhang */
amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device * adev)475968c6a2SHawking Zhang uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev)
485968c6a2SHawking Zhang {
495968c6a2SHawking Zhang struct amdgpu_mode_info *mode_info = &adev->mode_info;
505968c6a2SHawking Zhang int index;
515968c6a2SHawking Zhang u16 data_offset, size;
525968c6a2SHawking Zhang union firmware_info *firmware_info;
535968c6a2SHawking Zhang u8 frev, crev;
545968c6a2SHawking Zhang u32 fw_cap = 0;
555968c6a2SHawking Zhang
565968c6a2SHawking Zhang index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
575968c6a2SHawking Zhang firmwareinfo);
585968c6a2SHawking Zhang
595968c6a2SHawking Zhang if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
605968c6a2SHawking Zhang index, &size, &frev, &crev, &data_offset)) {
615968c6a2SHawking Zhang /* support firmware_info 3.1 + */
625968c6a2SHawking Zhang if ((frev == 3 && crev >= 1) || (frev > 3)) {
635968c6a2SHawking Zhang firmware_info = (union firmware_info *)
645968c6a2SHawking Zhang (mode_info->atom_context->bios + data_offset);
655968c6a2SHawking Zhang fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
665968c6a2SHawking Zhang }
675968c6a2SHawking Zhang }
685968c6a2SHawking Zhang
695968c6a2SHawking Zhang return fw_cap;
705968c6a2SHawking Zhang }
715968c6a2SHawking Zhang
7258ff791aSHawking Zhang /*
7358ff791aSHawking Zhang * Helper function to query gpu virtualizaiton capability
7458ff791aSHawking Zhang *
7558ff791aSHawking Zhang * @adev: amdgpu_device pointer
7658ff791aSHawking Zhang *
7758ff791aSHawking Zhang * Return true if gpu virtualization is supported or false if not
7858ff791aSHawking Zhang */
amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device * adev)7958ff791aSHawking Zhang bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev)
80a5bde2f9SAlex Deucher {
8158ff791aSHawking Zhang u32 fw_cap;
82a5bde2f9SAlex Deucher
8358ff791aSHawking Zhang fw_cap = adev->mode_info.firmware_flags;
84a5bde2f9SAlex Deucher
8558ff791aSHawking Zhang return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false;
86a5bde2f9SAlex Deucher }
87a5bde2f9SAlex Deucher
amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device * adev)88a5bde2f9SAlex Deucher void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
89a5bde2f9SAlex Deucher {
90a5bde2f9SAlex Deucher int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
91a5bde2f9SAlex Deucher firmwareinfo);
92a5bde2f9SAlex Deucher uint16_t data_offset;
93a5bde2f9SAlex Deucher
94a5bde2f9SAlex Deucher if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
95a5bde2f9SAlex Deucher NULL, NULL, &data_offset)) {
96a5bde2f9SAlex Deucher struct atom_firmware_info_v3_1 *firmware_info =
97a5bde2f9SAlex Deucher (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
98a5bde2f9SAlex Deucher data_offset);
99a5bde2f9SAlex Deucher
100a5bde2f9SAlex Deucher adev->bios_scratch_reg_offset =
101a5bde2f9SAlex Deucher le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
102a5bde2f9SAlex Deucher }
103a5bde2f9SAlex Deucher }
104a5bde2f9SAlex Deucher
amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device * adev,struct vram_usagebyfirmware_v2_1 * fw_usage,int * usage_bytes)1054864f2eeSTong Liu01 static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
1064864f2eeSTong Liu01 struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes)
107a5bde2f9SAlex Deucher {
1086d96ced7STong Liu01 u32 start_addr, fw_size, drv_size;
109a5bde2f9SAlex Deucher
1104864f2eeSTong Liu01 start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
1114864f2eeSTong Liu01 fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
1124864f2eeSTong Liu01 drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb);
113a5bde2f9SAlex Deucher
1144864f2eeSTong Liu01 DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n",
1154864f2eeSTong Liu01 start_addr,
1164864f2eeSTong Liu01 fw_size,
1174864f2eeSTong Liu01 drv_size);
11824738d7cSMonk Liu
1194864f2eeSTong Liu01 if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
1206d96ced7STong Liu01 (u32)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
12124738d7cSMonk Liu ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
12224738d7cSMonk Liu /* Firmware request VRAM reservation for SR-IOV */
12387ded5caSAlex Deucher adev->mman.fw_vram_usage_start_offset = (start_addr &
12424738d7cSMonk Liu (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
1254864f2eeSTong Liu01 adev->mman.fw_vram_usage_size = fw_size << 10;
12624738d7cSMonk Liu /* Use the default scratch size */
1274864f2eeSTong Liu01 *usage_bytes = 0;
12824738d7cSMonk Liu } else {
1294864f2eeSTong Liu01 *usage_bytes = drv_size << 10;
1304864f2eeSTong Liu01 }
1314864f2eeSTong Liu01 return 0;
1324864f2eeSTong Liu01 }
1334864f2eeSTong Liu01
amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device * adev,struct vram_usagebyfirmware_v2_2 * fw_usage,int * usage_bytes)1344864f2eeSTong Liu01 static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
1354864f2eeSTong Liu01 struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes)
1364864f2eeSTong Liu01 {
1376d96ced7STong Liu01 u32 fw_start_addr, fw_size, drv_start_addr, drv_size;
1384864f2eeSTong Liu01
1394864f2eeSTong Liu01 fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
1404864f2eeSTong Liu01 fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
1414864f2eeSTong Liu01
1424864f2eeSTong Liu01 drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb);
1434864f2eeSTong Liu01 drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb);
1444864f2eeSTong Liu01
1454864f2eeSTong Liu01 DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n",
1464864f2eeSTong Liu01 fw_start_addr,
1474864f2eeSTong Liu01 fw_size,
1484864f2eeSTong Liu01 drv_start_addr,
1494864f2eeSTong Liu01 drv_size);
1504864f2eeSTong Liu01
151c0924ad7SLikun Gao if (amdgpu_sriov_vf(adev) &&
152c0924ad7SLikun Gao ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
153c0924ad7SLikun Gao ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
1544864f2eeSTong Liu01 /* Firmware request VRAM reservation for SR-IOV */
1554864f2eeSTong Liu01 adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
1564864f2eeSTong Liu01 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
1574864f2eeSTong Liu01 adev->mman.fw_vram_usage_size = fw_size << 10;
1584864f2eeSTong Liu01 }
1594864f2eeSTong Liu01
160c0924ad7SLikun Gao if (amdgpu_sriov_vf(adev) &&
161c0924ad7SLikun Gao ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
162c0924ad7SLikun Gao ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
1634864f2eeSTong Liu01 /* driver request VRAM reservation for SR-IOV */
1644864f2eeSTong Liu01 adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
1654864f2eeSTong Liu01 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
1664864f2eeSTong Liu01 adev->mman.drv_vram_usage_size = drv_size << 10;
1674864f2eeSTong Liu01 }
1684864f2eeSTong Liu01
1694864f2eeSTong Liu01 *usage_bytes = 0;
1704864f2eeSTong Liu01 return 0;
1714864f2eeSTong Liu01 }
1724864f2eeSTong Liu01
amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device * adev)1734864f2eeSTong Liu01 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
1744864f2eeSTong Liu01 {
1754864f2eeSTong Liu01 struct atom_context *ctx = adev->mode_info.atom_context;
1764864f2eeSTong Liu01 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
1774864f2eeSTong Liu01 vram_usagebyfirmware);
1784864f2eeSTong Liu01 struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
1794864f2eeSTong Liu01 struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
1806d96ced7STong Liu01 u16 data_offset;
1816d96ced7STong Liu01 u8 frev, crev;
1824864f2eeSTong Liu01 int usage_bytes = 0;
1834864f2eeSTong Liu01
1844864f2eeSTong Liu01 if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
1854864f2eeSTong Liu01 if (frev == 2 && crev == 1) {
1864864f2eeSTong Liu01 fw_usage_v2_1 =
1874864f2eeSTong Liu01 (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
1884864f2eeSTong Liu01 amdgpu_atomfirmware_allocate_fb_v2_1(adev,
1894864f2eeSTong Liu01 fw_usage_v2_1,
1904864f2eeSTong Liu01 &usage_bytes);
1914864f2eeSTong Liu01 } else if (frev >= 2 && crev >= 2) {
1924864f2eeSTong Liu01 fw_usage_v2_2 =
1934864f2eeSTong Liu01 (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
1944864f2eeSTong Liu01 amdgpu_atomfirmware_allocate_fb_v2_2(adev,
1954864f2eeSTong Liu01 fw_usage_v2_2,
1964864f2eeSTong Liu01 &usage_bytes);
19724738d7cSMonk Liu }
198a5bde2f9SAlex Deucher }
1994864f2eeSTong Liu01
200a5bde2f9SAlex Deucher ctx->scratch_size_bytes = 0;
201a5bde2f9SAlex Deucher if (usage_bytes == 0)
202a5bde2f9SAlex Deucher usage_bytes = 20 * 1024;
203a5bde2f9SAlex Deucher /* allocate some scratch memory */
204a5bde2f9SAlex Deucher ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
205a5bde2f9SAlex Deucher if (!ctx->scratch)
206a5bde2f9SAlex Deucher return -ENOMEM;
207a5bde2f9SAlex Deucher ctx->scratch_size_bytes = usage_bytes;
208a5bde2f9SAlex Deucher return 0;
209a5bde2f9SAlex Deucher }
21021f6bcb6SAlex Deucher
21121f6bcb6SAlex Deucher union igp_info {
21221f6bcb6SAlex Deucher struct atom_integrated_system_info_v1_11 v11;
213836dab85SAlex Deucher struct atom_integrated_system_info_v1_12 v12;
21478683229SHuang Rui struct atom_integrated_system_info_v2_1 v21;
21519f0edd8SLi Ma struct atom_integrated_system_info_v2_3 v23;
21621f6bcb6SAlex Deucher };
21721f6bcb6SAlex Deucher
2181e09b053SHawking Zhang union umc_info {
2191e09b053SHawking Zhang struct atom_umc_info_v3_1 v31;
220b69d5c7eSHawking Zhang struct atom_umc_info_v3_2 v32;
221b69d5c7eSHawking Zhang struct atom_umc_info_v3_3 v33;
222e0c5c387SHawking Zhang struct atom_umc_info_v4_0 v40;
2231e09b053SHawking Zhang };
22427e39d3dSHawking Zhang
22527e39d3dSHawking Zhang union vram_info {
22627e39d3dSHawking Zhang struct atom_vram_info_header_v2_3 v23;
22789d7a79cSHawking Zhang struct atom_vram_info_header_v2_4 v24;
2288b41903aSHawking Zhang struct atom_vram_info_header_v2_5 v25;
229147d082dSFeifei Xu struct atom_vram_info_header_v2_6 v26;
2307089dd3cSHawking Zhang struct atom_vram_info_header_v3_0 v30;
23127e39d3dSHawking Zhang };
23221f6bcb6SAlex Deucher
233bd552027SAlex Deucher union vram_module {
234bd552027SAlex Deucher struct atom_vram_module_v9 v9;
235bd552027SAlex Deucher struct atom_vram_module_v10 v10;
2368b41903aSHawking Zhang struct atom_vram_module_v11 v11;
2377089dd3cSHawking Zhang struct atom_vram_module_v3_0 v30;
238bd552027SAlex Deucher };
23979077ee1SAlex Deucher
convert_atom_mem_type_to_vram_type(struct amdgpu_device * adev,int atom_mem_type)2401e09b053SHawking Zhang static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
2411e09b053SHawking Zhang int atom_mem_type)
2421e09b053SHawking Zhang {
2431e09b053SHawking Zhang int vram_type;
2441e09b053SHawking Zhang
2451e09b053SHawking Zhang if (adev->flags & AMD_IS_APU) {
2461e09b053SHawking Zhang switch (atom_mem_type) {
2471e09b053SHawking Zhang case Ddr2MemType:
2481e09b053SHawking Zhang case LpDdr2MemType:
2491e09b053SHawking Zhang vram_type = AMDGPU_VRAM_TYPE_DDR2;
2501e09b053SHawking Zhang break;
2511e09b053SHawking Zhang case Ddr3MemType:
2521e09b053SHawking Zhang case LpDdr3MemType:
2531e09b053SHawking Zhang vram_type = AMDGPU_VRAM_TYPE_DDR3;
2541e09b053SHawking Zhang break;
2551e09b053SHawking Zhang case Ddr4MemType:
2561e09b053SHawking Zhang vram_type = AMDGPU_VRAM_TYPE_DDR4;
2571e09b053SHawking Zhang break;
258d534ca71SAlex Deucher case LpDdr4MemType:
259d534ca71SAlex Deucher vram_type = AMDGPU_VRAM_TYPE_LPDDR4;
260d534ca71SAlex Deucher break;
26115c90a1fSHuang Rui case Ddr5MemType:
26215c90a1fSHuang Rui vram_type = AMDGPU_VRAM_TYPE_DDR5;
26315c90a1fSHuang Rui break;
264d534ca71SAlex Deucher case LpDdr5MemType:
265d534ca71SAlex Deucher vram_type = AMDGPU_VRAM_TYPE_LPDDR5;
266d534ca71SAlex Deucher break;
2671e09b053SHawking Zhang default:
2681e09b053SHawking Zhang vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
2691e09b053SHawking Zhang break;
2701e09b053SHawking Zhang }
2711e09b053SHawking Zhang } else {
2721e09b053SHawking Zhang switch (atom_mem_type) {
2731e09b053SHawking Zhang case ATOM_DGPU_VRAM_TYPE_GDDR5:
2741e09b053SHawking Zhang vram_type = AMDGPU_VRAM_TYPE_GDDR5;
2751e09b053SHawking Zhang break;
276801281feSHawking Zhang case ATOM_DGPU_VRAM_TYPE_HBM2:
2778081f8faSFeifei Xu case ATOM_DGPU_VRAM_TYPE_HBM2E:
278cd8d77f3SHawking Zhang case ATOM_DGPU_VRAM_TYPE_HBM3:
2791e09b053SHawking Zhang vram_type = AMDGPU_VRAM_TYPE_HBM;
2801e09b053SHawking Zhang break;
28189d7a79cSHawking Zhang case ATOM_DGPU_VRAM_TYPE_GDDR6:
28289d7a79cSHawking Zhang vram_type = AMDGPU_VRAM_TYPE_GDDR6;
28389d7a79cSHawking Zhang break;
2841e09b053SHawking Zhang default:
2851e09b053SHawking Zhang vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
2861e09b053SHawking Zhang break;
2871e09b053SHawking Zhang }
2881e09b053SHawking Zhang }
2891e09b053SHawking Zhang
2901e09b053SHawking Zhang return vram_type;
2911e09b053SHawking Zhang }
292bd552027SAlex Deucher
293ad02e08eSOri Messinger int
amdgpu_atomfirmware_get_vram_info(struct amdgpu_device * adev,int * vram_width,int * vram_type,int * vram_vendor)294ad02e08eSOri Messinger amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
295ad02e08eSOri Messinger int *vram_width, int *vram_type,
296ad02e08eSOri Messinger int *vram_vendor)
2971e09b053SHawking Zhang {
2981e09b053SHawking Zhang struct amdgpu_mode_info *mode_info = &adev->mode_info;
299bd552027SAlex Deucher int index, i = 0;
3001e09b053SHawking Zhang u16 data_offset, size;
3011e09b053SHawking Zhang union igp_info *igp_info;
30227e39d3dSHawking Zhang union vram_info *vram_info;
3039a55c779SFrank Min union umc_info *umc_info;
304bd552027SAlex Deucher union vram_module *vram_module;
3051e09b053SHawking Zhang u8 frev, crev;
3061e09b053SHawking Zhang u8 mem_type;
307ad02e08eSOri Messinger u8 mem_vendor;
308bd552027SAlex Deucher u32 mem_channel_number;
309bd552027SAlex Deucher u32 mem_channel_width;
310bd552027SAlex Deucher u32 module_id;
311bd552027SAlex Deucher
3121e09b053SHawking Zhang if (adev->flags & AMD_IS_APU)
3131e09b053SHawking Zhang index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
3141e09b053SHawking Zhang integratedsysteminfo);
3159a55c779SFrank Min else {
3169a55c779SFrank Min switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3179a55c779SFrank Min case IP_VERSION(12, 0, 0):
3189a55c779SFrank Min case IP_VERSION(12, 0, 1):
3199a55c779SFrank Min index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info);
3209a55c779SFrank Min break;
3219a55c779SFrank Min default:
3229a55c779SFrank Min index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info);
3239a55c779SFrank Min }
3249a55c779SFrank Min }
3251e09b053SHawking Zhang if (amdgpu_atom_parse_data_header(mode_info->atom_context,
3261e09b053SHawking Zhang index, &size,
3271e09b053SHawking Zhang &frev, &crev, &data_offset)) {
3281e09b053SHawking Zhang if (adev->flags & AMD_IS_APU) {
3291e09b053SHawking Zhang igp_info = (union igp_info *)
3301e09b053SHawking Zhang (mode_info->atom_context->bios + data_offset);
33178683229SHuang Rui switch (frev) {
33278683229SHuang Rui case 1:
3331e09b053SHawking Zhang switch (crev) {
3341e09b053SHawking Zhang case 11:
33578683229SHuang Rui case 12:
336bd552027SAlex Deucher mem_channel_number = igp_info->v11.umachannelnumber;
33778683229SHuang Rui if (!mem_channel_number)
33878683229SHuang Rui mem_channel_number = 1;
3391e09b053SHawking Zhang mem_type = igp_info->v11.memorytype;
340c09b3bf7SAlex Deucher if (mem_type == LpDdr5MemType)
341c09b3bf7SAlex Deucher mem_channel_width = 32;
342c09b3bf7SAlex Deucher else
343c09b3bf7SAlex Deucher mem_channel_width = 64;
344c09b3bf7SAlex Deucher if (vram_width)
345c09b3bf7SAlex Deucher *vram_width = mem_channel_number * mem_channel_width;
346bd552027SAlex Deucher if (vram_type)
347bd552027SAlex Deucher *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
348bd552027SAlex Deucher break;
34978683229SHuang Rui default:
35078683229SHuang Rui return -EINVAL;
35178683229SHuang Rui }
35278683229SHuang Rui break;
35378683229SHuang Rui case 2:
35478683229SHuang Rui switch (crev) {
35578683229SHuang Rui case 1:
35678683229SHuang Rui case 2:
35778683229SHuang Rui mem_channel_number = igp_info->v21.umachannelnumber;
35878683229SHuang Rui if (!mem_channel_number)
35978683229SHuang Rui mem_channel_number = 1;
36078683229SHuang Rui mem_type = igp_info->v21.memorytype;
361c09b3bf7SAlex Deucher if (mem_type == LpDdr5MemType)
362c09b3bf7SAlex Deucher mem_channel_width = 32;
363c09b3bf7SAlex Deucher else
364c09b3bf7SAlex Deucher mem_channel_width = 64;
365c09b3bf7SAlex Deucher if (vram_width)
366c09b3bf7SAlex Deucher *vram_width = mem_channel_number * mem_channel_width;
367836dab85SAlex Deucher if (vram_type)
368836dab85SAlex Deucher *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
369836dab85SAlex Deucher break;
37019f0edd8SLi Ma case 3:
37119f0edd8SLi Ma mem_channel_number = igp_info->v23.umachannelnumber;
37219f0edd8SLi Ma if (!mem_channel_number)
37319f0edd8SLi Ma mem_channel_number = 1;
37419f0edd8SLi Ma mem_type = igp_info->v23.memorytype;
37519f0edd8SLi Ma if (mem_type == LpDdr5MemType)
37619f0edd8SLi Ma mem_channel_width = 32;
37719f0edd8SLi Ma else
37819f0edd8SLi Ma mem_channel_width = 64;
37919f0edd8SLi Ma if (vram_width)
38019f0edd8SLi Ma *vram_width = mem_channel_number * mem_channel_width;
38119f0edd8SLi Ma if (vram_type)
38219f0edd8SLi Ma *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
38319f0edd8SLi Ma break;
3841e09b053SHawking Zhang default:
385bd552027SAlex Deucher return -EINVAL;
3861e09b053SHawking Zhang }
38778683229SHuang Rui break;
38878683229SHuang Rui default:
38978683229SHuang Rui return -EINVAL;
39078683229SHuang Rui }
3911e09b053SHawking Zhang } else {
3929a55c779SFrank Min switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3939a55c779SFrank Min case IP_VERSION(12, 0, 0):
3949a55c779SFrank Min case IP_VERSION(12, 0, 1):
3959a55c779SFrank Min umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
3969a55c779SFrank Min
3979a55c779SFrank Min if (frev == 4) {
3989a55c779SFrank Min switch (crev) {
3999a55c779SFrank Min case 0:
4009a55c779SFrank Min mem_channel_number = le32_to_cpu(umc_info->v40.channel_num);
4019a55c779SFrank Min mem_type = le32_to_cpu(umc_info->v40.vram_type);
4029a55c779SFrank Min mem_channel_width = le32_to_cpu(umc_info->v40.channel_width);
4039a55c779SFrank Min mem_vendor = RREG32(adev->bios_scratch_reg_offset + 4) & 0xF;
4049a55c779SFrank Min if (vram_vendor)
4059a55c779SFrank Min *vram_vendor = mem_vendor;
4069a55c779SFrank Min if (vram_type)
4079a55c779SFrank Min *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
4089a55c779SFrank Min if (vram_width)
4099a55c779SFrank Min *vram_width = mem_channel_number * (1 << mem_channel_width);
4109a55c779SFrank Min break;
4119a55c779SFrank Min default:
4129a55c779SFrank Min return -EINVAL;
4139a55c779SFrank Min }
4149a55c779SFrank Min } else
4159a55c779SFrank Min return -EINVAL;
4169a55c779SFrank Min break;
4179a55c779SFrank Min default:
41827e39d3dSHawking Zhang vram_info = (union vram_info *)
4191e09b053SHawking Zhang (mode_info->atom_context->bios + data_offset);
4209a55c779SFrank Min
421bd552027SAlex Deucher module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
4227089dd3cSHawking Zhang if (frev == 3) {
4231e09b053SHawking Zhang switch (crev) {
4247089dd3cSHawking Zhang /* v30 */
4257089dd3cSHawking Zhang case 0:
4267089dd3cSHawking Zhang vram_module = (union vram_module *)vram_info->v30.vram_module;
4277089dd3cSHawking Zhang mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF;
4287089dd3cSHawking Zhang if (vram_vendor)
4297089dd3cSHawking Zhang *vram_vendor = mem_vendor;
4307089dd3cSHawking Zhang mem_type = vram_info->v30.memory_type;
4317089dd3cSHawking Zhang if (vram_type)
4327089dd3cSHawking Zhang *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
4337089dd3cSHawking Zhang mem_channel_number = vram_info->v30.channel_num;
4347089dd3cSHawking Zhang mem_channel_width = vram_info->v30.channel_width;
4357089dd3cSHawking Zhang if (vram_width)
43683edf00dSAlex Deucher *vram_width = mem_channel_number * 16;
4377089dd3cSHawking Zhang break;
4387089dd3cSHawking Zhang default:
4397089dd3cSHawking Zhang return -EINVAL;
4407089dd3cSHawking Zhang }
4417089dd3cSHawking Zhang } else if (frev == 2) {
4427089dd3cSHawking Zhang switch (crev) {
4437089dd3cSHawking Zhang /* v23 */
44427e39d3dSHawking Zhang case 3:
445bd552027SAlex Deucher if (module_id > vram_info->v23.vram_module_num)
446bd552027SAlex Deucher module_id = 0;
447bd552027SAlex Deucher vram_module = (union vram_module *)vram_info->v23.vram_module;
448bd552027SAlex Deucher while (i < module_id) {
449bd552027SAlex Deucher vram_module = (union vram_module *)
450bd552027SAlex Deucher ((u8 *)vram_module + vram_module->v9.vram_module_size);
451bd552027SAlex Deucher i++;
4521e09b053SHawking Zhang }
453bd552027SAlex Deucher mem_type = vram_module->v9.memory_type;
454bd552027SAlex Deucher if (vram_type)
455bd552027SAlex Deucher *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
456bd552027SAlex Deucher mem_channel_number = vram_module->v9.channel_num;
457bd552027SAlex Deucher mem_channel_width = vram_module->v9.channel_width;
458bd552027SAlex Deucher if (vram_width)
459bd552027SAlex Deucher *vram_width = mem_channel_number * (1 << mem_channel_width);
460ad02e08eSOri Messinger mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
461ad02e08eSOri Messinger if (vram_vendor)
462ad02e08eSOri Messinger *vram_vendor = mem_vendor;
463bd552027SAlex Deucher break;
4647089dd3cSHawking Zhang /* v24 */
465bd552027SAlex Deucher case 4:
466bd552027SAlex Deucher if (module_id > vram_info->v24.vram_module_num)
467bd552027SAlex Deucher module_id = 0;
468bd552027SAlex Deucher vram_module = (union vram_module *)vram_info->v24.vram_module;
469bd552027SAlex Deucher while (i < module_id) {
470bd552027SAlex Deucher vram_module = (union vram_module *)
471bd552027SAlex Deucher ((u8 *)vram_module + vram_module->v10.vram_module_size);
472bd552027SAlex Deucher i++;
473bd552027SAlex Deucher }
474bd552027SAlex Deucher mem_type = vram_module->v10.memory_type;
475bd552027SAlex Deucher if (vram_type)
476bd552027SAlex Deucher *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
477bd552027SAlex Deucher mem_channel_number = vram_module->v10.channel_num;
478bd552027SAlex Deucher mem_channel_width = vram_module->v10.channel_width;
479bd552027SAlex Deucher if (vram_width)
480bd552027SAlex Deucher *vram_width = mem_channel_number * (1 << mem_channel_width);
481ad02e08eSOri Messinger mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
482ad02e08eSOri Messinger if (vram_vendor)
483ad02e08eSOri Messinger *vram_vendor = mem_vendor;
484bd552027SAlex Deucher break;
4857089dd3cSHawking Zhang /* v25 */
4868b41903aSHawking Zhang case 5:
4878b41903aSHawking Zhang if (module_id > vram_info->v25.vram_module_num)
4888b41903aSHawking Zhang module_id = 0;
4898b41903aSHawking Zhang vram_module = (union vram_module *)vram_info->v25.vram_module;
4908b41903aSHawking Zhang while (i < module_id) {
4918b41903aSHawking Zhang vram_module = (union vram_module *)
4928b41903aSHawking Zhang ((u8 *)vram_module + vram_module->v11.vram_module_size);
4938b41903aSHawking Zhang i++;
4948b41903aSHawking Zhang }
4958b41903aSHawking Zhang mem_type = vram_module->v11.memory_type;
4968b41903aSHawking Zhang if (vram_type)
4978b41903aSHawking Zhang *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
4988b41903aSHawking Zhang mem_channel_number = vram_module->v11.channel_num;
4998b41903aSHawking Zhang mem_channel_width = vram_module->v11.channel_width;
5008b41903aSHawking Zhang if (vram_width)
5018b41903aSHawking Zhang *vram_width = mem_channel_number * (1 << mem_channel_width);
5028b41903aSHawking Zhang mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
5038b41903aSHawking Zhang if (vram_vendor)
5048b41903aSHawking Zhang *vram_vendor = mem_vendor;
5058b41903aSHawking Zhang break;
5067089dd3cSHawking Zhang /* v26 */
507f31c4a11SHawking Zhang case 6:
508f31c4a11SHawking Zhang if (module_id > vram_info->v26.vram_module_num)
509f31c4a11SHawking Zhang module_id = 0;
510f31c4a11SHawking Zhang vram_module = (union vram_module *)vram_info->v26.vram_module;
511f31c4a11SHawking Zhang while (i < module_id) {
512f31c4a11SHawking Zhang vram_module = (union vram_module *)
513147d082dSFeifei Xu ((u8 *)vram_module + vram_module->v9.vram_module_size);
514f31c4a11SHawking Zhang i++;
515f31c4a11SHawking Zhang }
516f31c4a11SHawking Zhang mem_type = vram_module->v9.memory_type;
517f31c4a11SHawking Zhang if (vram_type)
518f31c4a11SHawking Zhang *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
519f31c4a11SHawking Zhang mem_channel_number = vram_module->v9.channel_num;
520f31c4a11SHawking Zhang mem_channel_width = vram_module->v9.channel_width;
521f31c4a11SHawking Zhang if (vram_width)
522f31c4a11SHawking Zhang *vram_width = mem_channel_number * (1 << mem_channel_width);
523f31c4a11SHawking Zhang mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
524f31c4a11SHawking Zhang if (vram_vendor)
525f31c4a11SHawking Zhang *vram_vendor = mem_vendor;
526f31c4a11SHawking Zhang break;
527bd552027SAlex Deucher default:
528bd552027SAlex Deucher return -EINVAL;
5291e09b053SHawking Zhang }
5307089dd3cSHawking Zhang } else {
5317089dd3cSHawking Zhang /* invalid frev */
5327089dd3cSHawking Zhang return -EINVAL;
5337089dd3cSHawking Zhang }
5341e09b053SHawking Zhang }
5359a55c779SFrank Min }
536bd552027SAlex Deucher }
537bd552027SAlex Deucher
5381e09b053SHawking Zhang return 0;
5391e09b053SHawking Zhang }
5401e09b053SHawking Zhang
541511c4348SHawking Zhang /*
542511c4348SHawking Zhang * Return true if vbios enabled ecc by default, if umc info table is available
543511c4348SHawking Zhang * or false if ecc is not enabled or umc info table is not available
544511c4348SHawking Zhang */
amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device * adev)545511c4348SHawking Zhang bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
546511c4348SHawking Zhang {
547511c4348SHawking Zhang struct amdgpu_mode_info *mode_info = &adev->mode_info;
548511c4348SHawking Zhang int index;
549511c4348SHawking Zhang u16 data_offset, size;
550511c4348SHawking Zhang union umc_info *umc_info;
551511c4348SHawking Zhang u8 frev, crev;
552*76b1f8b3SCandice Li bool mem_ecc_enabled = false;
55397e27292SHawking Zhang u8 umc_config;
55497e27292SHawking Zhang u32 umc_config1;
555*76b1f8b3SCandice Li adev->ras_default_ecc_enabled = false;
556511c4348SHawking Zhang
557511c4348SHawking Zhang index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
558511c4348SHawking Zhang umc_info);
559511c4348SHawking Zhang
560511c4348SHawking Zhang if (amdgpu_atom_parse_data_header(mode_info->atom_context,
561511c4348SHawking Zhang index, &size, &frev, &crev, &data_offset)) {
562e0c5c387SHawking Zhang umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
563b69d5c7eSHawking Zhang if (frev == 3) {
564b69d5c7eSHawking Zhang switch (crev) {
565b69d5c7eSHawking Zhang case 1:
56697e27292SHawking Zhang umc_config = le32_to_cpu(umc_info->v31.umc_config);
567*76b1f8b3SCandice Li mem_ecc_enabled =
56897e27292SHawking Zhang (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
569b69d5c7eSHawking Zhang break;
570b69d5c7eSHawking Zhang case 2:
57197e27292SHawking Zhang umc_config = le32_to_cpu(umc_info->v32.umc_config);
572*76b1f8b3SCandice Li mem_ecc_enabled =
57397e27292SHawking Zhang (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
574b69d5c7eSHawking Zhang break;
575b69d5c7eSHawking Zhang case 3:
57697e27292SHawking Zhang umc_config = le32_to_cpu(umc_info->v33.umc_config);
57797e27292SHawking Zhang umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
578*76b1f8b3SCandice Li mem_ecc_enabled =
57997e27292SHawking Zhang ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
58097e27292SHawking Zhang (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
581*76b1f8b3SCandice Li adev->ras_default_ecc_enabled =
582*76b1f8b3SCandice Li (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
583b69d5c7eSHawking Zhang break;
584b69d5c7eSHawking Zhang default:
585b69d5c7eSHawking Zhang /* unsupported crev */
586b69d5c7eSHawking Zhang return false;
587b69d5c7eSHawking Zhang }
588e0c5c387SHawking Zhang } else if (frev == 4) {
589e0c5c387SHawking Zhang switch (crev) {
590e0c5c387SHawking Zhang case 0:
591*76b1f8b3SCandice Li umc_config = le32_to_cpu(umc_info->v40.umc_config);
592e0c5c387SHawking Zhang umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
593*76b1f8b3SCandice Li mem_ecc_enabled =
594e0c5c387SHawking Zhang (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
595*76b1f8b3SCandice Li adev->ras_default_ecc_enabled =
596*76b1f8b3SCandice Li (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
597e0c5c387SHawking Zhang break;
598e0c5c387SHawking Zhang default:
599e0c5c387SHawking Zhang /* unsupported crev */
600e0c5c387SHawking Zhang return false;
601e0c5c387SHawking Zhang }
602e0c5c387SHawking Zhang } else {
603e0c5c387SHawking Zhang /* unsupported frev */
604e0c5c387SHawking Zhang return false;
605511c4348SHawking Zhang }
606511c4348SHawking Zhang }
607511c4348SHawking Zhang
608*76b1f8b3SCandice Li return mem_ecc_enabled;
609511c4348SHawking Zhang }
610511c4348SHawking Zhang
6118b6da23fSHawking Zhang /*
612698b1010SHawking Zhang * Helper function to query sram ecc capablity
613698b1010SHawking Zhang *
614698b1010SHawking Zhang * @adev: amdgpu_device pointer
615698b1010SHawking Zhang *
6168b6da23fSHawking Zhang * Return true if vbios supports sram ecc or false if not
6178b6da23fSHawking Zhang */
amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device * adev)6188b6da23fSHawking Zhang bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
6198b6da23fSHawking Zhang {
620698b1010SHawking Zhang u32 fw_cap;
6218b6da23fSHawking Zhang
622698b1010SHawking Zhang fw_cap = adev->mode_info.firmware_flags;
6238b6da23fSHawking Zhang
624698b1010SHawking Zhang return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
6258b6da23fSHawking Zhang }
6268b6da23fSHawking Zhang
627cffd6f9dSHawking Zhang /*
628cffd6f9dSHawking Zhang * Helper function to query dynamic boot config capability
629cffd6f9dSHawking Zhang *
630cffd6f9dSHawking Zhang * @adev: amdgpu_device pointer
631cffd6f9dSHawking Zhang *
632cffd6f9dSHawking Zhang * Return true if vbios supports dynamic boot config or false if not
633cffd6f9dSHawking Zhang */
amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device * adev)634cffd6f9dSHawking Zhang bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev)
635cffd6f9dSHawking Zhang {
636cffd6f9dSHawking Zhang u32 fw_cap;
637cffd6f9dSHawking Zhang
638cffd6f9dSHawking Zhang fw_cap = adev->mode_info.firmware_flags;
639cffd6f9dSHawking Zhang
640cffd6f9dSHawking Zhang return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
641cffd6f9dSHawking Zhang }
642cffd6f9dSHawking Zhang
643a6a355a2SLuben Tuikov /**
644a6a355a2SLuben Tuikov * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
645bbe04decSIsabella Basso * @adev: amdgpu_device pointer
646bbe04decSIsabella Basso * @i2c_address: pointer to u8; if not NULL, will contain
647a6a355a2SLuben Tuikov * the RAS EEPROM address if the function returns true
64814fb496aSJohn Clements *
649a6a355a2SLuben Tuikov * Return true if VBIOS supports RAS EEPROM address reporting,
650a6a355a2SLuben Tuikov * else return false. If true and @i2c_address is not NULL,
651a6a355a2SLuben Tuikov * will contain the RAS ROM address.
65214fb496aSJohn Clements */
amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device * adev,u8 * i2c_address)653a6a355a2SLuben Tuikov bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
654a6a355a2SLuben Tuikov u8 *i2c_address)
65514fb496aSJohn Clements {
65614fb496aSJohn Clements struct amdgpu_mode_info *mode_info = &adev->mode_info;
65714fb496aSJohn Clements int index;
65814fb496aSJohn Clements u16 data_offset, size;
65914fb496aSJohn Clements union firmware_info *firmware_info;
66014fb496aSJohn Clements u8 frev, crev;
66114fb496aSJohn Clements
66214fb496aSJohn Clements index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
66314fb496aSJohn Clements firmwareinfo);
66414fb496aSJohn Clements
66514fb496aSJohn Clements if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
666a6a355a2SLuben Tuikov index, &size, &frev, &crev,
667a6a355a2SLuben Tuikov &data_offset)) {
66814fb496aSJohn Clements /* support firmware_info 3.4 + */
66914fb496aSJohn Clements if ((frev == 3 && crev >= 4) || (frev > 3)) {
67014fb496aSJohn Clements firmware_info = (union firmware_info *)
67114fb496aSJohn Clements (mode_info->atom_context->bios + data_offset);
672a6a355a2SLuben Tuikov /* The ras_rom_i2c_slave_addr should ideally
673a6a355a2SLuben Tuikov * be a 19-bit EEPROM address, which would be
674a6a355a2SLuben Tuikov * used as is by the driver; see top of
675a6a355a2SLuben Tuikov * amdgpu_eeprom.c.
676a6a355a2SLuben Tuikov *
677a6a355a2SLuben Tuikov * When this is the case, 0 is of course a
678a6a355a2SLuben Tuikov * valid RAS EEPROM address, in which case,
679a6a355a2SLuben Tuikov * we'll drop the first "if (firm...)" and only
680a6a355a2SLuben Tuikov * leave the check for the pointer.
681a6a355a2SLuben Tuikov *
682a6a355a2SLuben Tuikov * The reason this works right now is because
683a6a355a2SLuben Tuikov * ras_rom_i2c_slave_addr contains the EEPROM
684a6a355a2SLuben Tuikov * device type qualifier 1010b in the top 4
685a6a355a2SLuben Tuikov * bits.
686a6a355a2SLuben Tuikov */
687a6a355a2SLuben Tuikov if (firmware_info->v34.ras_rom_i2c_slave_addr) {
688a6a355a2SLuben Tuikov if (i2c_address)
68914fb496aSJohn Clements *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
69014fb496aSJohn Clements return true;
691a6a355a2SLuben Tuikov }
692a6a355a2SLuben Tuikov }
693a6a355a2SLuben Tuikov }
69414fb496aSJohn Clements
69514fb496aSJohn Clements return false;
69614fb496aSJohn Clements }
69714fb496aSJohn Clements
69814fb496aSJohn Clements
69979077ee1SAlex Deucher union smu_info {
70079077ee1SAlex Deucher struct atom_smu_info_v3_1 v31;
701f0b0a1b8SHawking Zhang struct atom_smu_info_v4_0 v40;
70279077ee1SAlex Deucher };
70379077ee1SAlex Deucher
704a8d59943SHawking Zhang union gfx_info {
705a8d59943SHawking Zhang struct atom_gfx_info_v2_2 v22;
706a8d59943SHawking Zhang struct atom_gfx_info_v2_4 v24;
707a8d59943SHawking Zhang struct atom_gfx_info_v2_7 v27;
708a8d59943SHawking Zhang struct atom_gfx_info_v3_0 v30;
709a8d59943SHawking Zhang };
710a8d59943SHawking Zhang
amdgpu_atomfirmware_get_clock_info(struct amdgpu_device * adev)71179077ee1SAlex Deucher int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
71279077ee1SAlex Deucher {
71379077ee1SAlex Deucher struct amdgpu_mode_info *mode_info = &adev->mode_info;
71479077ee1SAlex Deucher struct amdgpu_pll *spll = &adev->clock.spll;
71579077ee1SAlex Deucher struct amdgpu_pll *mpll = &adev->clock.mpll;
71679077ee1SAlex Deucher uint8_t frev, crev;
71779077ee1SAlex Deucher uint16_t data_offset;
71879077ee1SAlex Deucher int ret = -EINVAL, index;
71979077ee1SAlex Deucher
72079077ee1SAlex Deucher index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
72179077ee1SAlex Deucher firmwareinfo);
72279077ee1SAlex Deucher if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
72379077ee1SAlex Deucher &frev, &crev, &data_offset)) {
72479077ee1SAlex Deucher union firmware_info *firmware_info =
72579077ee1SAlex Deucher (union firmware_info *)(mode_info->atom_context->bios +
72679077ee1SAlex Deucher data_offset);
72779077ee1SAlex Deucher
72879077ee1SAlex Deucher adev->clock.default_sclk =
72979077ee1SAlex Deucher le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
73079077ee1SAlex Deucher adev->clock.default_mclk =
73179077ee1SAlex Deucher le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
73279077ee1SAlex Deucher
73379077ee1SAlex Deucher adev->pm.current_sclk = adev->clock.default_sclk;
73479077ee1SAlex Deucher adev->pm.current_mclk = adev->clock.default_mclk;
73579077ee1SAlex Deucher
73679077ee1SAlex Deucher ret = 0;
73779077ee1SAlex Deucher }
73879077ee1SAlex Deucher
73979077ee1SAlex Deucher index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
74079077ee1SAlex Deucher smu_info);
74179077ee1SAlex Deucher if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
74279077ee1SAlex Deucher &frev, &crev, &data_offset)) {
74379077ee1SAlex Deucher union smu_info *smu_info =
74479077ee1SAlex Deucher (union smu_info *)(mode_info->atom_context->bios +
74579077ee1SAlex Deucher data_offset);
74679077ee1SAlex Deucher
74779077ee1SAlex Deucher /* system clock */
748f0b0a1b8SHawking Zhang if (frev == 3)
74979077ee1SAlex Deucher spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
750f0b0a1b8SHawking Zhang else if (frev == 4)
751f0b0a1b8SHawking Zhang spll->reference_freq = le32_to_cpu(smu_info->v40.core_refclk_10khz);
75279077ee1SAlex Deucher
75379077ee1SAlex Deucher spll->reference_div = 0;
75479077ee1SAlex Deucher spll->min_post_div = 1;
75579077ee1SAlex Deucher spll->max_post_div = 1;
75679077ee1SAlex Deucher spll->min_ref_div = 2;
75779077ee1SAlex Deucher spll->max_ref_div = 0xff;
75879077ee1SAlex Deucher spll->min_feedback_div = 4;
75979077ee1SAlex Deucher spll->max_feedback_div = 0xff;
76079077ee1SAlex Deucher spll->best_vco = 0;
76179077ee1SAlex Deucher
76279077ee1SAlex Deucher ret = 0;
76379077ee1SAlex Deucher }
76479077ee1SAlex Deucher
76579077ee1SAlex Deucher index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
76679077ee1SAlex Deucher umc_info);
76779077ee1SAlex Deucher if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
76879077ee1SAlex Deucher &frev, &crev, &data_offset)) {
76979077ee1SAlex Deucher union umc_info *umc_info =
77079077ee1SAlex Deucher (union umc_info *)(mode_info->atom_context->bios +
77179077ee1SAlex Deucher data_offset);
77279077ee1SAlex Deucher
77379077ee1SAlex Deucher /* memory clock */
77479077ee1SAlex Deucher mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
77579077ee1SAlex Deucher
77679077ee1SAlex Deucher mpll->reference_div = 0;
77779077ee1SAlex Deucher mpll->min_post_div = 1;
77879077ee1SAlex Deucher mpll->max_post_div = 1;
77979077ee1SAlex Deucher mpll->min_ref_div = 2;
78079077ee1SAlex Deucher mpll->max_ref_div = 0xff;
78179077ee1SAlex Deucher mpll->min_feedback_div = 4;
78279077ee1SAlex Deucher mpll->max_feedback_div = 0xff;
78379077ee1SAlex Deucher mpll->best_vco = 0;
78479077ee1SAlex Deucher
78579077ee1SAlex Deucher ret = 0;
78679077ee1SAlex Deucher }
78779077ee1SAlex Deucher
7889a530062SAaron Liu /* if asic is Navi+, the rlc reference clock is used for system clock
7899a530062SAaron Liu * from vbios gfx_info table */
7909a530062SAaron Liu if (adev->asic_type >= CHIP_NAVI10) {
7919a530062SAaron Liu index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
7929a530062SAaron Liu gfx_info);
7939a530062SAaron Liu if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
7949a530062SAaron Liu &frev, &crev, &data_offset)) {
795a8d59943SHawking Zhang union gfx_info *gfx_info = (union gfx_info *)
7969a530062SAaron Liu (mode_info->atom_context->bios + data_offset);
797a8d59943SHawking Zhang if ((frev == 3) ||
798a8d59943SHawking Zhang (frev == 2 && crev == 6)) {
799a8d59943SHawking Zhang spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk);
8009a530062SAaron Liu ret = 0;
801a8d59943SHawking Zhang } else if ((frev == 2) &&
802a8d59943SHawking Zhang (crev >= 2) &&
803a8d59943SHawking Zhang (crev != 6)) {
804a8d59943SHawking Zhang spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk);
805a8d59943SHawking Zhang ret = 0;
806a8d59943SHawking Zhang } else {
807a8d59943SHawking Zhang BUG();
808a8d59943SHawking Zhang }
8099a530062SAaron Liu }
8109a530062SAaron Liu }
8119a530062SAaron Liu
81279077ee1SAlex Deucher return ret;
81379077ee1SAlex Deucher }
81459b0b509SAlex Deucher
amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device * adev)81559b0b509SAlex Deucher int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
81659b0b509SAlex Deucher {
81759b0b509SAlex Deucher struct amdgpu_mode_info *mode_info = &adev->mode_info;
81859b0b509SAlex Deucher int index;
81959b0b509SAlex Deucher uint8_t frev, crev;
82059b0b509SAlex Deucher uint16_t data_offset;
82159b0b509SAlex Deucher
82259b0b509SAlex Deucher index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
82359b0b509SAlex Deucher gfx_info);
82459b0b509SAlex Deucher if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
82559b0b509SAlex Deucher &frev, &crev, &data_offset)) {
82659b0b509SAlex Deucher union gfx_info *gfx_info = (union gfx_info *)
82759b0b509SAlex Deucher (mode_info->atom_context->bios + data_offset);
828f5fb30b6SHawking Zhang if (frev == 2) {
82959b0b509SAlex Deucher switch (crev) {
83059b0b509SAlex Deucher case 4:
8310ae6afbfSHuang Rui adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
8320ae6afbfSHuang Rui adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
8330ae6afbfSHuang Rui adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
8340ae6afbfSHuang Rui adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
8350ae6afbfSHuang Rui adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
83659b0b509SAlex Deucher adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
83759b0b509SAlex Deucher adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
83859b0b509SAlex Deucher adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
83959b0b509SAlex Deucher adev->gfx.config.gs_prim_buffer_depth =
84059b0b509SAlex Deucher le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
84159b0b509SAlex Deucher adev->gfx.config.double_offchip_lds_buf =
84259b0b509SAlex Deucher gfx_info->v24.gc_double_offchip_lds_buffer;
843f9fb22a2SShaoyun Liu adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
844f9fb22a2SShaoyun Liu adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
845f9fb22a2SShaoyun Liu adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
84659b0b509SAlex Deucher adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
84759b0b509SAlex Deucher return 0;
8487159a36eSHawking Zhang case 7:
8497159a36eSHawking Zhang adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
8507159a36eSHawking Zhang adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
8517159a36eSHawking Zhang adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
8527159a36eSHawking Zhang adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
8537159a36eSHawking Zhang adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
8547159a36eSHawking Zhang adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
8557159a36eSHawking Zhang adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
8567159a36eSHawking Zhang adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
8577159a36eSHawking Zhang adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
8587159a36eSHawking Zhang adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
8597159a36eSHawking Zhang adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
8607159a36eSHawking Zhang adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
8617159a36eSHawking Zhang adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
8627159a36eSHawking Zhang adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
8637159a36eSHawking Zhang return 0;
86459b0b509SAlex Deucher default:
86559b0b509SAlex Deucher return -EINVAL;
86659b0b509SAlex Deucher }
867f5fb30b6SHawking Zhang } else if (frev == 3) {
868f5fb30b6SHawking Zhang switch (crev) {
869f5fb30b6SHawking Zhang case 0:
870f5fb30b6SHawking Zhang adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines;
871f5fb30b6SHawking Zhang adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh;
872f5fb30b6SHawking Zhang adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se;
873f5fb30b6SHawking Zhang adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se;
874f5fb30b6SHawking Zhang adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches;
875f5fb30b6SHawking Zhang return 0;
876f5fb30b6SHawking Zhang default:
877f5fb30b6SHawking Zhang return -EINVAL;
878f5fb30b6SHawking Zhang }
879f5fb30b6SHawking Zhang } else {
880f5fb30b6SHawking Zhang return -EINVAL;
881f5fb30b6SHawking Zhang }
88259b0b509SAlex Deucher
88359b0b509SAlex Deucher }
88459b0b509SAlex Deucher return -EINVAL;
88559b0b509SAlex Deucher }
886efe4f000STianci.Yin
887efe4f000STianci.Yin /*
88882a52030SHawking Zhang * Helper function to query two stage mem training capability
88982a52030SHawking Zhang *
89082a52030SHawking Zhang * @adev: amdgpu_device pointer
89182a52030SHawking Zhang *
89282a52030SHawking Zhang * Return true if two stage mem training is supported or false if not
893efe4f000STianci.Yin */
amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device * adev)89482a52030SHawking Zhang bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev)
895efe4f000STianci.Yin {
89682a52030SHawking Zhang u32 fw_cap;
897efe4f000STianci.Yin
89882a52030SHawking Zhang fw_cap = adev->mode_info.firmware_flags;
899efe4f000STianci.Yin
90082a52030SHawking Zhang return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false;
901efe4f000STianci.Yin }
902efe4f000STianci.Yin
amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device * adev)9039a244ebeSHawking Zhang int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
9049a244ebeSHawking Zhang {
9059a244ebeSHawking Zhang struct atom_context *ctx = adev->mode_info.atom_context;
9069a244ebeSHawking Zhang union firmware_info *firmware_info;
9079a244ebeSHawking Zhang int index;
9089a244ebeSHawking Zhang u16 data_offset, size;
9099a244ebeSHawking Zhang u8 frev, crev;
9109a244ebeSHawking Zhang int fw_reserved_fb_size;
9119a244ebeSHawking Zhang
9129a244ebeSHawking Zhang index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
9139a244ebeSHawking Zhang firmwareinfo);
9149a244ebeSHawking Zhang
9159a244ebeSHawking Zhang if (!amdgpu_atom_parse_data_header(ctx, index, &size,
9169a244ebeSHawking Zhang &frev, &crev, &data_offset))
9179a244ebeSHawking Zhang /* fail to parse data_header */
9189a244ebeSHawking Zhang return 0;
9199a244ebeSHawking Zhang
9209a244ebeSHawking Zhang firmware_info = (union firmware_info *)(ctx->bios + data_offset);
9219a244ebeSHawking Zhang
9229a244ebeSHawking Zhang if (frev != 3)
9239a244ebeSHawking Zhang return -EINVAL;
9249a244ebeSHawking Zhang
9259a244ebeSHawking Zhang switch (crev) {
9269a244ebeSHawking Zhang case 4:
9279a244ebeSHawking Zhang fw_reserved_fb_size =
9289a244ebeSHawking Zhang (firmware_info->v34.fw_reserved_size_in_kb << 10);
9299a244ebeSHawking Zhang break;
9303cfaadbeSLikun Gao case 5:
9313cfaadbeSLikun Gao fw_reserved_fb_size =
9323cfaadbeSLikun Gao (firmware_info->v35.fw_reserved_size_in_kb << 10);
9333cfaadbeSLikun Gao break;
9349a244ebeSHawking Zhang default:
9359a244ebeSHawking Zhang fw_reserved_fb_size = 0;
9369a244ebeSHawking Zhang break;
9379a244ebeSHawking Zhang }
9389a244ebeSHawking Zhang
9399a244ebeSHawking Zhang return fw_reserved_fb_size;
9409a244ebeSHawking Zhang }
941ba75f6ebSHawking Zhang
942ba75f6ebSHawking Zhang /*
943ba75f6ebSHawking Zhang * Helper function to execute asic_init table
944ba75f6ebSHawking Zhang *
945ba75f6ebSHawking Zhang * @adev: amdgpu_device pointer
946ba75f6ebSHawking Zhang * @fb_reset: flag to indicate whether fb is reset or not
947ba75f6ebSHawking Zhang *
948ba75f6ebSHawking Zhang * Return 0 if succeed, otherwise failed
949ba75f6ebSHawking Zhang */
amdgpu_atomfirmware_asic_init(struct amdgpu_device * adev,bool fb_reset)950ba75f6ebSHawking Zhang int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
951ba75f6ebSHawking Zhang {
952ba75f6ebSHawking Zhang struct amdgpu_mode_info *mode_info = &adev->mode_info;
953ba75f6ebSHawking Zhang struct atom_context *ctx;
954ba75f6ebSHawking Zhang uint8_t frev, crev;
955ba75f6ebSHawking Zhang uint16_t data_offset;
956ba75f6ebSHawking Zhang uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz;
957ba75f6ebSHawking Zhang struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1;
958ba75f6ebSHawking Zhang int index;
959ba75f6ebSHawking Zhang
960ba75f6ebSHawking Zhang if (!mode_info)
961ba75f6ebSHawking Zhang return -EINVAL;
962ba75f6ebSHawking Zhang
963ba75f6ebSHawking Zhang ctx = mode_info->atom_context;
964ba75f6ebSHawking Zhang if (!ctx)
965ba75f6ebSHawking Zhang return -EINVAL;
966ba75f6ebSHawking Zhang
967ba75f6ebSHawking Zhang /* query bootup sclk/mclk from firmware_info table */
968ba75f6ebSHawking Zhang index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
969ba75f6ebSHawking Zhang firmwareinfo);
970ba75f6ebSHawking Zhang if (amdgpu_atom_parse_data_header(ctx, index, NULL,
971ba75f6ebSHawking Zhang &frev, &crev, &data_offset)) {
972ba75f6ebSHawking Zhang union firmware_info *firmware_info =
973ba75f6ebSHawking Zhang (union firmware_info *)(ctx->bios +
974ba75f6ebSHawking Zhang data_offset);
975ba75f6ebSHawking Zhang
976ba75f6ebSHawking Zhang bootup_sclk_in10khz =
977ba75f6ebSHawking Zhang le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
978ba75f6ebSHawking Zhang bootup_mclk_in10khz =
979ba75f6ebSHawking Zhang le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
980ba75f6ebSHawking Zhang } else {
981ba75f6ebSHawking Zhang return -EINVAL;
982ba75f6ebSHawking Zhang }
983ba75f6ebSHawking Zhang
984ba75f6ebSHawking Zhang index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
985ba75f6ebSHawking Zhang asic_init);
986ba75f6ebSHawking Zhang if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) {
987ba75f6ebSHawking Zhang if (frev == 2 && crev >= 1) {
988ba75f6ebSHawking Zhang memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1));
989ba75f6ebSHawking Zhang asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz;
990ba75f6ebSHawking Zhang asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz;
991ba75f6ebSHawking Zhang asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT;
992ba75f6ebSHawking Zhang if (!fb_reset)
993ba75f6ebSHawking Zhang asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT;
994ba75f6ebSHawking Zhang else
995ba75f6ebSHawking Zhang asic_init_ps_v2_1.param.memparam.memflag = 0;
996ba75f6ebSHawking Zhang } else {
997ba75f6ebSHawking Zhang return -EINVAL;
998ba75f6ebSHawking Zhang }
999ba75f6ebSHawking Zhang } else {
1000ba75f6ebSHawking Zhang return -EINVAL;
1001ba75f6ebSHawking Zhang }
1002ba75f6ebSHawking Zhang
10034630d503SAlexander Richards return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1,
10044630d503SAlexander Richards sizeof(asic_init_ps_v2_1));
1005ba75f6ebSHawking Zhang }
1006