1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
31 
32 #include "soc15.h"
33 #include "gfx_v9_0.h"
34 #include "gfx_v9_4_3.h"
35 #include "gmc_v9_0.h"
36 #include "df_v1_7.h"
37 #include "df_v3_6.h"
38 #include "df_v4_3.h"
39 #include "df_v4_6_2.h"
40 #include "nbio_v6_1.h"
41 #include "nbio_v7_0.h"
42 #include "nbio_v7_4.h"
43 #include "nbio_v7_9.h"
44 #include "nbio_v7_11.h"
45 #include "hdp_v4_0.h"
46 #include "vega10_ih.h"
47 #include "vega20_ih.h"
48 #include "sdma_v4_0.h"
49 #include "sdma_v4_4_2.h"
50 #include "uvd_v7_0.h"
51 #include "vce_v4_0.h"
52 #include "vcn_v1_0.h"
53 #include "vcn_v2_5.h"
54 #include "jpeg_v2_5.h"
55 #include "smuio_v9_0.h"
56 #include "gmc_v10_0.h"
57 #include "gmc_v11_0.h"
58 #include "gmc_v12_0.h"
59 #include "gfxhub_v2_0.h"
60 #include "mmhub_v2_0.h"
61 #include "nbio_v2_3.h"
62 #include "nbio_v4_3.h"
63 #include "nbio_v7_2.h"
64 #include "nbio_v7_7.h"
65 #include "nbif_v6_3_1.h"
66 #include "hdp_v5_0.h"
67 #include "hdp_v5_2.h"
68 #include "hdp_v6_0.h"
69 #include "hdp_v7_0.h"
70 #include "nv.h"
71 #include "soc21.h"
72 #include "soc24.h"
73 #include "navi10_ih.h"
74 #include "ih_v6_0.h"
75 #include "ih_v6_1.h"
76 #include "ih_v7_0.h"
77 #include "gfx_v10_0.h"
78 #include "gfx_v11_0.h"
79 #include "gfx_v12_0.h"
80 #include "sdma_v5_0.h"
81 #include "sdma_v5_2.h"
82 #include "sdma_v6_0.h"
83 #include "sdma_v7_0.h"
84 #include "lsdma_v6_0.h"
85 #include "lsdma_v7_0.h"
86 #include "vcn_v2_0.h"
87 #include "jpeg_v2_0.h"
88 #include "vcn_v3_0.h"
89 #include "jpeg_v3_0.h"
90 #include "vcn_v4_0.h"
91 #include "jpeg_v4_0.h"
92 #include "vcn_v4_0_3.h"
93 #include "jpeg_v4_0_3.h"
94 #include "vcn_v4_0_5.h"
95 #include "jpeg_v4_0_5.h"
96 #include "amdgpu_vkms.h"
97 #include "mes_v11_0.h"
98 #include "mes_v12_0.h"
99 #include "smuio_v11_0.h"
100 #include "smuio_v11_0_6.h"
101 #include "smuio_v13_0.h"
102 #include "smuio_v13_0_3.h"
103 #include "smuio_v13_0_6.h"
104 #include "smuio_v14_0_2.h"
105 #include "vcn_v5_0_0.h"
106 #include "jpeg_v5_0_0.h"
107 
108 #include "amdgpu_vpe.h"
109 #if defined(CONFIG_DRM_AMD_ISP)
110 #include "amdgpu_isp.h"
111 #endif
112 
113 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
114 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
115 
116 #define mmIP_DISCOVERY_VERSION  0x16A00
117 #define mmRCC_CONFIG_MEMSIZE	0xde3
118 #define mmMP0_SMN_C2PMSG_33	0x16061
119 #define mmMM_INDEX		0x0
120 #define mmMM_INDEX_HI		0x6
121 #define mmMM_DATA		0x1
122 
123 static const char *hw_id_names[HW_ID_MAX] = {
124 	[MP1_HWID]		= "MP1",
125 	[MP2_HWID]		= "MP2",
126 	[THM_HWID]		= "THM",
127 	[SMUIO_HWID]		= "SMUIO",
128 	[FUSE_HWID]		= "FUSE",
129 	[CLKA_HWID]		= "CLKA",
130 	[PWR_HWID]		= "PWR",
131 	[GC_HWID]		= "GC",
132 	[UVD_HWID]		= "UVD",
133 	[AUDIO_AZ_HWID]		= "AUDIO_AZ",
134 	[ACP_HWID]		= "ACP",
135 	[DCI_HWID]		= "DCI",
136 	[DMU_HWID]		= "DMU",
137 	[DCO_HWID]		= "DCO",
138 	[DIO_HWID]		= "DIO",
139 	[XDMA_HWID]		= "XDMA",
140 	[DCEAZ_HWID]		= "DCEAZ",
141 	[DAZ_HWID]		= "DAZ",
142 	[SDPMUX_HWID]		= "SDPMUX",
143 	[NTB_HWID]		= "NTB",
144 	[IOHC_HWID]		= "IOHC",
145 	[L2IMU_HWID]		= "L2IMU",
146 	[VCE_HWID]		= "VCE",
147 	[MMHUB_HWID]		= "MMHUB",
148 	[ATHUB_HWID]		= "ATHUB",
149 	[DBGU_NBIO_HWID]	= "DBGU_NBIO",
150 	[DFX_HWID]		= "DFX",
151 	[DBGU0_HWID]		= "DBGU0",
152 	[DBGU1_HWID]		= "DBGU1",
153 	[OSSSYS_HWID]		= "OSSSYS",
154 	[HDP_HWID]		= "HDP",
155 	[SDMA0_HWID]		= "SDMA0",
156 	[SDMA1_HWID]		= "SDMA1",
157 	[SDMA2_HWID]		= "SDMA2",
158 	[SDMA3_HWID]		= "SDMA3",
159 	[LSDMA_HWID]		= "LSDMA",
160 	[ISP_HWID]		= "ISP",
161 	[DBGU_IO_HWID]		= "DBGU_IO",
162 	[DF_HWID]		= "DF",
163 	[CLKB_HWID]		= "CLKB",
164 	[FCH_HWID]		= "FCH",
165 	[DFX_DAP_HWID]		= "DFX_DAP",
166 	[L1IMU_PCIE_HWID]	= "L1IMU_PCIE",
167 	[L1IMU_NBIF_HWID]	= "L1IMU_NBIF",
168 	[L1IMU_IOAGR_HWID]	= "L1IMU_IOAGR",
169 	[L1IMU3_HWID]		= "L1IMU3",
170 	[L1IMU4_HWID]		= "L1IMU4",
171 	[L1IMU5_HWID]		= "L1IMU5",
172 	[L1IMU6_HWID]		= "L1IMU6",
173 	[L1IMU7_HWID]		= "L1IMU7",
174 	[L1IMU8_HWID]		= "L1IMU8",
175 	[L1IMU9_HWID]		= "L1IMU9",
176 	[L1IMU10_HWID]		= "L1IMU10",
177 	[L1IMU11_HWID]		= "L1IMU11",
178 	[L1IMU12_HWID]		= "L1IMU12",
179 	[L1IMU13_HWID]		= "L1IMU13",
180 	[L1IMU14_HWID]		= "L1IMU14",
181 	[L1IMU15_HWID]		= "L1IMU15",
182 	[WAFLC_HWID]		= "WAFLC",
183 	[FCH_USB_PD_HWID]	= "FCH_USB_PD",
184 	[PCIE_HWID]		= "PCIE",
185 	[PCS_HWID]		= "PCS",
186 	[DDCL_HWID]		= "DDCL",
187 	[SST_HWID]		= "SST",
188 	[IOAGR_HWID]		= "IOAGR",
189 	[NBIF_HWID]		= "NBIF",
190 	[IOAPIC_HWID]		= "IOAPIC",
191 	[SYSTEMHUB_HWID]	= "SYSTEMHUB",
192 	[NTBCCP_HWID]		= "NTBCCP",
193 	[UMC_HWID]		= "UMC",
194 	[SATA_HWID]		= "SATA",
195 	[USB_HWID]		= "USB",
196 	[CCXSEC_HWID]		= "CCXSEC",
197 	[XGMI_HWID]		= "XGMI",
198 	[XGBE_HWID]		= "XGBE",
199 	[MP0_HWID]		= "MP0",
200 	[VPE_HWID]		= "VPE",
201 };
202 
203 static int hw_id_map[MAX_HWIP] = {
204 	[GC_HWIP]	= GC_HWID,
205 	[HDP_HWIP]	= HDP_HWID,
206 	[SDMA0_HWIP]	= SDMA0_HWID,
207 	[SDMA1_HWIP]	= SDMA1_HWID,
208 	[SDMA2_HWIP]    = SDMA2_HWID,
209 	[SDMA3_HWIP]    = SDMA3_HWID,
210 	[LSDMA_HWIP]    = LSDMA_HWID,
211 	[MMHUB_HWIP]	= MMHUB_HWID,
212 	[ATHUB_HWIP]	= ATHUB_HWID,
213 	[NBIO_HWIP]	= NBIF_HWID,
214 	[MP0_HWIP]	= MP0_HWID,
215 	[MP1_HWIP]	= MP1_HWID,
216 	[UVD_HWIP]	= UVD_HWID,
217 	[VCE_HWIP]	= VCE_HWID,
218 	[DF_HWIP]	= DF_HWID,
219 	[DCE_HWIP]	= DMU_HWID,
220 	[OSSSYS_HWIP]	= OSSSYS_HWID,
221 	[SMUIO_HWIP]	= SMUIO_HWID,
222 	[PWR_HWIP]	= PWR_HWID,
223 	[NBIF_HWIP]	= NBIF_HWID,
224 	[THM_HWIP]	= THM_HWID,
225 	[CLK_HWIP]	= CLKA_HWID,
226 	[UMC_HWIP]	= UMC_HWID,
227 	[XGMI_HWIP]	= XGMI_HWID,
228 	[DCI_HWIP]	= DCI_HWID,
229 	[PCIE_HWIP]	= PCIE_HWID,
230 	[VPE_HWIP]	= VPE_HWID,
231 	[ISP_HWIP]	= ISP_HWID,
232 };
233 
234 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
235 {
236 	u64 tmr_offset, tmr_size, pos;
237 	void *discv_regn;
238 	int ret;
239 
240 	ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
241 	if (ret)
242 		return ret;
243 
244 	pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
245 
246 	/* This region is read-only and reserved from system use */
247 	discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
248 	if (discv_regn) {
249 		memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
250 		memunmap(discv_regn);
251 		return 0;
252 	}
253 
254 	return -ENOENT;
255 }
256 
257 #define IP_DISCOVERY_V2		2
258 #define IP_DISCOVERY_V4		4
259 
260 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
261 						 uint8_t *binary)
262 {
263 	uint64_t vram_size;
264 	u32 msg;
265 	int i, ret = 0;
266 
267 	if (!amdgpu_sriov_vf(adev)) {
268 		/* It can take up to a second for IFWI init to complete on some dGPUs,
269 		 * but generally it should be in the 60-100ms range.  Normally this starts
270 		 * as soon as the device gets power so by the time the OS loads this has long
271 		 * completed.  However, when a card is hotplugged via e.g., USB4, we need to
272 		 * wait for this to complete.  Once the C2PMSG is updated, we can
273 		 * continue.
274 		 */
275 
276 		for (i = 0; i < 1000; i++) {
277 			msg = RREG32(mmMP0_SMN_C2PMSG_33);
278 			if (msg & 0x80000000)
279 				break;
280 			usleep_range(1000, 1100);
281 		}
282 	}
283 
284 	vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
285 
286 	if (vram_size) {
287 		uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
288 		amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
289 					  adev->mman.discovery_tmr_size, false);
290 	} else {
291 		ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
292 	}
293 
294 	return ret;
295 }
296 
297 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
298 {
299 	const struct firmware *fw;
300 	const char *fw_name;
301 	int r;
302 
303 	switch (amdgpu_discovery) {
304 	case 2:
305 		fw_name = FIRMWARE_IP_DISCOVERY;
306 		break;
307 	default:
308 		dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
309 		return -EINVAL;
310 	}
311 
312 	r = request_firmware(&fw, fw_name, adev->dev);
313 	if (r) {
314 		dev_err(adev->dev, "can't load firmware \"%s\"\n",
315 			fw_name);
316 		return r;
317 	}
318 
319 	memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
320 	release_firmware(fw);
321 
322 	return 0;
323 }
324 
325 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
326 {
327 	uint16_t checksum = 0;
328 	int i;
329 
330 	for (i = 0; i < size; i++)
331 		checksum += data[i];
332 
333 	return checksum;
334 }
335 
336 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
337 						    uint16_t expected)
338 {
339 	return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
340 }
341 
342 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
343 {
344 	struct binary_header *bhdr;
345 	bhdr = (struct binary_header *)binary;
346 
347 	return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
348 }
349 
350 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
351 {
352 	/*
353 	 * So far, apply this quirk only on those Navy Flounder boards which
354 	 * have a bad harvest table of VCN config.
355 	 */
356 	if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
357 	    (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
358 		switch (adev->pdev->revision) {
359 		case 0xC1:
360 		case 0xC2:
361 		case 0xC3:
362 		case 0xC5:
363 		case 0xC7:
364 		case 0xCF:
365 		case 0xDF:
366 			adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
367 			adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
368 			break;
369 		default:
370 			break;
371 		}
372 	}
373 }
374 
375 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
376 					   struct binary_header *bhdr)
377 {
378 	struct table_info *info;
379 	uint16_t checksum;
380 	uint16_t offset;
381 
382 	info = &bhdr->table_list[NPS_INFO];
383 	offset = le16_to_cpu(info->offset);
384 	checksum = le16_to_cpu(info->checksum);
385 
386 	struct nps_info_header *nhdr =
387 		(struct nps_info_header *)(adev->mman.discovery_bin + offset);
388 
389 	if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
390 		dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
391 		return -EINVAL;
392 	}
393 
394 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
395 					      le32_to_cpu(nhdr->size_bytes),
396 					      checksum)) {
397 		dev_dbg(adev->dev, "invalid nps info data table checksum\n");
398 		return -EINVAL;
399 	}
400 
401 	return 0;
402 }
403 
404 static int amdgpu_discovery_init(struct amdgpu_device *adev)
405 {
406 	struct table_info *info;
407 	struct binary_header *bhdr;
408 	uint16_t offset;
409 	uint16_t size;
410 	uint16_t checksum;
411 	int r;
412 
413 	adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
414 	adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
415 	if (!adev->mman.discovery_bin)
416 		return -ENOMEM;
417 
418 	/* Read from file if it is the preferred option */
419 	if (amdgpu_discovery == 2) {
420 		dev_info(adev->dev, "use ip discovery information from file");
421 		r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
422 
423 		if (r) {
424 			dev_err(adev->dev, "failed to read ip discovery binary from file\n");
425 			r = -EINVAL;
426 			goto out;
427 		}
428 
429 	} else {
430 		r = amdgpu_discovery_read_binary_from_mem(
431 			adev, adev->mman.discovery_bin);
432 		if (r)
433 			goto out;
434 	}
435 
436 	/* check the ip discovery binary signature */
437 	if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
438 		dev_err(adev->dev,
439 			"get invalid ip discovery binary signature\n");
440 		r = -EINVAL;
441 		goto out;
442 	}
443 
444 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
445 
446 	offset = offsetof(struct binary_header, binary_checksum) +
447 		sizeof(bhdr->binary_checksum);
448 	size = le16_to_cpu(bhdr->binary_size) - offset;
449 	checksum = le16_to_cpu(bhdr->binary_checksum);
450 
451 	if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
452 					      size, checksum)) {
453 		dev_err(adev->dev, "invalid ip discovery binary checksum\n");
454 		r = -EINVAL;
455 		goto out;
456 	}
457 
458 	info = &bhdr->table_list[IP_DISCOVERY];
459 	offset = le16_to_cpu(info->offset);
460 	checksum = le16_to_cpu(info->checksum);
461 
462 	if (offset) {
463 		struct ip_discovery_header *ihdr =
464 			(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
465 		if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
466 			dev_err(adev->dev, "invalid ip discovery data table signature\n");
467 			r = -EINVAL;
468 			goto out;
469 		}
470 
471 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
472 						      le16_to_cpu(ihdr->size), checksum)) {
473 			dev_err(adev->dev, "invalid ip discovery data table checksum\n");
474 			r = -EINVAL;
475 			goto out;
476 		}
477 	}
478 
479 	info = &bhdr->table_list[GC];
480 	offset = le16_to_cpu(info->offset);
481 	checksum = le16_to_cpu(info->checksum);
482 
483 	if (offset) {
484 		struct gpu_info_header *ghdr =
485 			(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
486 
487 		if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
488 			dev_err(adev->dev, "invalid ip discovery gc table id\n");
489 			r = -EINVAL;
490 			goto out;
491 		}
492 
493 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
494 						      le32_to_cpu(ghdr->size), checksum)) {
495 			dev_err(adev->dev, "invalid gc data table checksum\n");
496 			r = -EINVAL;
497 			goto out;
498 		}
499 	}
500 
501 	info = &bhdr->table_list[HARVEST_INFO];
502 	offset = le16_to_cpu(info->offset);
503 	checksum = le16_to_cpu(info->checksum);
504 
505 	if (offset) {
506 		struct harvest_info_header *hhdr =
507 			(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
508 
509 		if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
510 			dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
511 			r = -EINVAL;
512 			goto out;
513 		}
514 
515 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
516 						      sizeof(struct harvest_table), checksum)) {
517 			dev_err(adev->dev, "invalid harvest data table checksum\n");
518 			r = -EINVAL;
519 			goto out;
520 		}
521 	}
522 
523 	info = &bhdr->table_list[VCN_INFO];
524 	offset = le16_to_cpu(info->offset);
525 	checksum = le16_to_cpu(info->checksum);
526 
527 	if (offset) {
528 		struct vcn_info_header *vhdr =
529 			(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
530 
531 		if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
532 			dev_err(adev->dev, "invalid ip discovery vcn table id\n");
533 			r = -EINVAL;
534 			goto out;
535 		}
536 
537 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
538 						      le32_to_cpu(vhdr->size_bytes), checksum)) {
539 			dev_err(adev->dev, "invalid vcn data table checksum\n");
540 			r = -EINVAL;
541 			goto out;
542 		}
543 	}
544 
545 	info = &bhdr->table_list[MALL_INFO];
546 	offset = le16_to_cpu(info->offset);
547 	checksum = le16_to_cpu(info->checksum);
548 
549 	if (0 && offset) {
550 		struct mall_info_header *mhdr =
551 			(struct mall_info_header *)(adev->mman.discovery_bin + offset);
552 
553 		if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
554 			dev_err(adev->dev, "invalid ip discovery mall table id\n");
555 			r = -EINVAL;
556 			goto out;
557 		}
558 
559 		if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
560 						      le32_to_cpu(mhdr->size_bytes), checksum)) {
561 			dev_err(adev->dev, "invalid mall data table checksum\n");
562 			r = -EINVAL;
563 			goto out;
564 		}
565 	}
566 
567 	return 0;
568 
569 out:
570 	kfree(adev->mman.discovery_bin);
571 	adev->mman.discovery_bin = NULL;
572 	if ((amdgpu_discovery != 2) &&
573 	    (RREG32(mmIP_DISCOVERY_VERSION) == 4))
574 		amdgpu_ras_query_boot_status(adev, 4);
575 	return r;
576 }
577 
578 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
579 
580 void amdgpu_discovery_fini(struct amdgpu_device *adev)
581 {
582 	amdgpu_discovery_sysfs_fini(adev);
583 	kfree(adev->mman.discovery_bin);
584 	adev->mman.discovery_bin = NULL;
585 }
586 
587 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
588 {
589 	if (ip->instance_number >= HWIP_MAX_INSTANCE) {
590 		DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
591 			  ip->instance_number);
592 		return -EINVAL;
593 	}
594 	if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
595 		DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
596 			  le16_to_cpu(ip->hw_id));
597 		return -EINVAL;
598 	}
599 
600 	return 0;
601 }
602 
603 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
604 						uint32_t *vcn_harvest_count)
605 {
606 	struct binary_header *bhdr;
607 	struct ip_discovery_header *ihdr;
608 	struct die_header *dhdr;
609 	struct ip_v4 *ip;
610 	uint16_t die_offset, ip_offset, num_dies, num_ips;
611 	int i, j;
612 
613 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
614 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
615 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
616 	num_dies = le16_to_cpu(ihdr->num_dies);
617 
618 	/* scan harvest bit of all IP data structures */
619 	for (i = 0; i < num_dies; i++) {
620 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
621 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
622 		num_ips = le16_to_cpu(dhdr->num_ips);
623 		ip_offset = die_offset + sizeof(*dhdr);
624 
625 		for (j = 0; j < num_ips; j++) {
626 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
627 
628 			if (amdgpu_discovery_validate_ip(ip))
629 				goto next_ip;
630 
631 			if (le16_to_cpu(ip->variant) == 1) {
632 				switch (le16_to_cpu(ip->hw_id)) {
633 				case VCN_HWID:
634 					(*vcn_harvest_count)++;
635 					if (ip->instance_number == 0) {
636 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
637 						adev->vcn.inst_mask &=
638 							~AMDGPU_VCN_HARVEST_VCN0;
639 						adev->jpeg.inst_mask &=
640 							~AMDGPU_VCN_HARVEST_VCN0;
641 					} else {
642 						adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
643 						adev->vcn.inst_mask &=
644 							~AMDGPU_VCN_HARVEST_VCN1;
645 						adev->jpeg.inst_mask &=
646 							~AMDGPU_VCN_HARVEST_VCN1;
647 					}
648 					break;
649 				case DMU_HWID:
650 					adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
651 					break;
652 				default:
653 					break;
654 				}
655 			}
656 next_ip:
657 			if (ihdr->base_addr_64_bit)
658 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
659 			else
660 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
661 		}
662 	}
663 }
664 
665 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
666 						     uint32_t *vcn_harvest_count,
667 						     uint32_t *umc_harvest_count)
668 {
669 	struct binary_header *bhdr;
670 	struct harvest_table *harvest_info;
671 	u16 offset;
672 	int i;
673 	uint32_t umc_harvest_config = 0;
674 
675 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
676 	offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
677 
678 	if (!offset) {
679 		dev_err(adev->dev, "invalid harvest table offset\n");
680 		return;
681 	}
682 
683 	harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
684 
685 	for (i = 0; i < 32; i++) {
686 		if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
687 			break;
688 
689 		switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
690 		case VCN_HWID:
691 			(*vcn_harvest_count)++;
692 			adev->vcn.harvest_config |=
693 				(1 << harvest_info->list[i].number_instance);
694 			adev->jpeg.harvest_config |=
695 				(1 << harvest_info->list[i].number_instance);
696 
697 			adev->vcn.inst_mask &=
698 				~(1U << harvest_info->list[i].number_instance);
699 			adev->jpeg.inst_mask &=
700 				~(1U << harvest_info->list[i].number_instance);
701 			break;
702 		case DMU_HWID:
703 			adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
704 			break;
705 		case UMC_HWID:
706 			umc_harvest_config |=
707 				1 << (le16_to_cpu(harvest_info->list[i].number_instance));
708 			(*umc_harvest_count)++;
709 			break;
710 		case GC_HWID:
711 			adev->gfx.xcc_mask &=
712 				~(1U << harvest_info->list[i].number_instance);
713 			break;
714 		case SDMA0_HWID:
715 			adev->sdma.sdma_mask &=
716 				~(1U << harvest_info->list[i].number_instance);
717 			break;
718 #if defined(CONFIG_DRM_AMD_ISP)
719 		case ISP_HWID:
720 			adev->isp.harvest_config |=
721 				~(1U << harvest_info->list[i].number_instance);
722 			break;
723 #endif
724 		default:
725 			break;
726 		}
727 	}
728 
729 	adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
730 				~umc_harvest_config;
731 }
732 
733 /* ================================================== */
734 
735 struct ip_hw_instance {
736 	struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
737 
738 	int hw_id;
739 	u8  num_instance;
740 	u8  major, minor, revision;
741 	u8  harvest;
742 
743 	int num_base_addresses;
744 	u32 base_addr[] __counted_by(num_base_addresses);
745 };
746 
747 struct ip_hw_id {
748 	struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
749 	int hw_id;
750 };
751 
752 struct ip_die_entry {
753 	struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
754 	u16 num_ips;
755 };
756 
757 /* -------------------------------------------------- */
758 
759 struct ip_hw_instance_attr {
760 	struct attribute attr;
761 	ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
762 };
763 
764 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
765 {
766 	return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
767 }
768 
769 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
770 {
771 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
772 }
773 
774 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
775 {
776 	return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
777 }
778 
779 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
780 {
781 	return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
782 }
783 
784 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
785 {
786 	return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
787 }
788 
789 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
790 {
791 	return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
792 }
793 
794 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
795 {
796 	return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
797 }
798 
799 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
800 {
801 	ssize_t res, at;
802 	int ii;
803 
804 	for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
805 		/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
806 		 */
807 		if (at + 12 > PAGE_SIZE)
808 			break;
809 		res = sysfs_emit_at(buf, at, "0x%08X\n",
810 				    ip_hw_instance->base_addr[ii]);
811 		if (res <= 0)
812 			break;
813 		at += res;
814 	}
815 
816 	return res < 0 ? res : at;
817 }
818 
819 static struct ip_hw_instance_attr ip_hw_attr[] = {
820 	__ATTR_RO(hw_id),
821 	__ATTR_RO(num_instance),
822 	__ATTR_RO(major),
823 	__ATTR_RO(minor),
824 	__ATTR_RO(revision),
825 	__ATTR_RO(harvest),
826 	__ATTR_RO(num_base_addresses),
827 	__ATTR_RO(base_addr),
828 };
829 
830 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
831 ATTRIBUTE_GROUPS(ip_hw_instance);
832 
833 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
834 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
835 
836 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
837 					struct attribute *attr,
838 					char *buf)
839 {
840 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
841 	struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
842 
843 	if (!ip_hw_attr->show)
844 		return -EIO;
845 
846 	return ip_hw_attr->show(ip_hw_instance, buf);
847 }
848 
849 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
850 	.show = ip_hw_instance_attr_show,
851 };
852 
853 static void ip_hw_instance_release(struct kobject *kobj)
854 {
855 	struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
856 
857 	kfree(ip_hw_instance);
858 }
859 
860 static const struct kobj_type ip_hw_instance_ktype = {
861 	.release = ip_hw_instance_release,
862 	.sysfs_ops = &ip_hw_instance_sysfs_ops,
863 	.default_groups = ip_hw_instance_groups,
864 };
865 
866 /* -------------------------------------------------- */
867 
868 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
869 
870 static void ip_hw_id_release(struct kobject *kobj)
871 {
872 	struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
873 
874 	if (!list_empty(&ip_hw_id->hw_id_kset.list))
875 		DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
876 	kfree(ip_hw_id);
877 }
878 
879 static const struct kobj_type ip_hw_id_ktype = {
880 	.release = ip_hw_id_release,
881 	.sysfs_ops = &kobj_sysfs_ops,
882 };
883 
884 /* -------------------------------------------------- */
885 
886 static void die_kobj_release(struct kobject *kobj);
887 static void ip_disc_release(struct kobject *kobj);
888 
889 struct ip_die_entry_attribute {
890 	struct attribute attr;
891 	ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
892 };
893 
894 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
895 
896 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
897 {
898 	return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
899 }
900 
901 /* If there are more ip_die_entry attrs, other than the number of IPs,
902  * we can make this intro an array of attrs, and then initialize
903  * ip_die_entry_attrs in a loop.
904  */
905 static struct ip_die_entry_attribute num_ips_attr =
906 	__ATTR_RO(num_ips);
907 
908 static struct attribute *ip_die_entry_attrs[] = {
909 	&num_ips_attr.attr,
910 	NULL,
911 };
912 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
913 
914 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
915 
916 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
917 				      struct attribute *attr,
918 				      char *buf)
919 {
920 	struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
921 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
922 
923 	if (!ip_die_entry_attr->show)
924 		return -EIO;
925 
926 	return ip_die_entry_attr->show(ip_die_entry, buf);
927 }
928 
929 static void ip_die_entry_release(struct kobject *kobj)
930 {
931 	struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
932 
933 	if (!list_empty(&ip_die_entry->ip_kset.list))
934 		DRM_ERROR("ip_die_entry->ip_kset is not empty");
935 	kfree(ip_die_entry);
936 }
937 
938 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
939 	.show = ip_die_entry_attr_show,
940 };
941 
942 static const struct kobj_type ip_die_entry_ktype = {
943 	.release = ip_die_entry_release,
944 	.sysfs_ops = &ip_die_entry_sysfs_ops,
945 	.default_groups = ip_die_entry_groups,
946 };
947 
948 static const struct kobj_type die_kobj_ktype = {
949 	.release = die_kobj_release,
950 	.sysfs_ops = &kobj_sysfs_ops,
951 };
952 
953 static const struct kobj_type ip_discovery_ktype = {
954 	.release = ip_disc_release,
955 	.sysfs_ops = &kobj_sysfs_ops,
956 };
957 
958 struct ip_discovery_top {
959 	struct kobject kobj;    /* ip_discovery/ */
960 	struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
961 	struct amdgpu_device *adev;
962 };
963 
964 static void die_kobj_release(struct kobject *kobj)
965 {
966 	struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
967 						       struct ip_discovery_top,
968 						       die_kset);
969 	if (!list_empty(&ip_top->die_kset.list))
970 		DRM_ERROR("ip_top->die_kset is not empty");
971 }
972 
973 static void ip_disc_release(struct kobject *kobj)
974 {
975 	struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
976 						       kobj);
977 	struct amdgpu_device *adev = ip_top->adev;
978 
979 	adev->ip_top = NULL;
980 	kfree(ip_top);
981 }
982 
983 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
984 						 uint16_t hw_id, uint8_t inst)
985 {
986 	uint8_t harvest = 0;
987 
988 	/* Until a uniform way is figured, get mask based on hwid */
989 	switch (hw_id) {
990 	case VCN_HWID:
991 		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
992 		break;
993 	case DMU_HWID:
994 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
995 			harvest = 0x1;
996 		break;
997 	case UMC_HWID:
998 		/* TODO: It needs another parsing; for now, ignore.*/
999 		break;
1000 	case GC_HWID:
1001 		harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
1002 		break;
1003 	case SDMA0_HWID:
1004 		harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
1005 		break;
1006 	default:
1007 		break;
1008 	}
1009 
1010 	return harvest;
1011 }
1012 
1013 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1014 				      struct ip_die_entry *ip_die_entry,
1015 				      const size_t _ip_offset, const int num_ips,
1016 				      bool reg_base_64)
1017 {
1018 	int ii, jj, kk, res;
1019 
1020 	DRM_DEBUG("num_ips:%d", num_ips);
1021 
1022 	/* Find all IPs of a given HW ID, and add their instance to
1023 	 * #die/#hw_id/#instance/<attributes>
1024 	 */
1025 	for (ii = 0; ii < HW_ID_MAX; ii++) {
1026 		struct ip_hw_id *ip_hw_id = NULL;
1027 		size_t ip_offset = _ip_offset;
1028 
1029 		for (jj = 0; jj < num_ips; jj++) {
1030 			struct ip_v4 *ip;
1031 			struct ip_hw_instance *ip_hw_instance;
1032 
1033 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1034 			if (amdgpu_discovery_validate_ip(ip) ||
1035 			    le16_to_cpu(ip->hw_id) != ii)
1036 				goto next_ip;
1037 
1038 			DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1039 
1040 			/* We have a hw_id match; register the hw
1041 			 * block if not yet registered.
1042 			 */
1043 			if (!ip_hw_id) {
1044 				ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
1045 				if (!ip_hw_id)
1046 					return -ENOMEM;
1047 				ip_hw_id->hw_id = ii;
1048 
1049 				kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1050 				ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1051 				ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1052 				res = kset_register(&ip_hw_id->hw_id_kset);
1053 				if (res) {
1054 					DRM_ERROR("Couldn't register ip_hw_id kset");
1055 					kfree(ip_hw_id);
1056 					return res;
1057 				}
1058 				if (hw_id_names[ii]) {
1059 					res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1060 								&ip_hw_id->hw_id_kset.kobj,
1061 								hw_id_names[ii]);
1062 					if (res) {
1063 						DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1064 							  hw_id_names[ii],
1065 							  kobject_name(&ip_die_entry->ip_kset.kobj));
1066 					}
1067 				}
1068 			}
1069 
1070 			/* Now register its instance.
1071 			 */
1072 			ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1073 							     base_addr,
1074 							     ip->num_base_address),
1075 						 GFP_KERNEL);
1076 			if (!ip_hw_instance) {
1077 				DRM_ERROR("no memory for ip_hw_instance");
1078 				return -ENOMEM;
1079 			}
1080 			ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1081 			ip_hw_instance->num_instance = ip->instance_number;
1082 			ip_hw_instance->major = ip->major;
1083 			ip_hw_instance->minor = ip->minor;
1084 			ip_hw_instance->revision = ip->revision;
1085 			ip_hw_instance->harvest =
1086 				amdgpu_discovery_get_harvest_info(
1087 					adev, ip_hw_instance->hw_id,
1088 					ip_hw_instance->num_instance);
1089 			ip_hw_instance->num_base_addresses = ip->num_base_address;
1090 
1091 			for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1092 				if (reg_base_64)
1093 					ip_hw_instance->base_addr[kk] =
1094 						lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1095 				else
1096 					ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1097 			}
1098 
1099 			kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1100 			ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1101 			res = kobject_add(&ip_hw_instance->kobj, NULL,
1102 					  "%d", ip_hw_instance->num_instance);
1103 next_ip:
1104 			if (reg_base_64)
1105 				ip_offset += struct_size(ip, base_address_64,
1106 							 ip->num_base_address);
1107 			else
1108 				ip_offset += struct_size(ip, base_address,
1109 							 ip->num_base_address);
1110 		}
1111 	}
1112 
1113 	return 0;
1114 }
1115 
1116 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1117 {
1118 	struct binary_header *bhdr;
1119 	struct ip_discovery_header *ihdr;
1120 	struct die_header *dhdr;
1121 	struct kset *die_kset = &adev->ip_top->die_kset;
1122 	u16 num_dies, die_offset, num_ips;
1123 	size_t ip_offset;
1124 	int ii, res;
1125 
1126 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1127 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1128 					      le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1129 	num_dies = le16_to_cpu(ihdr->num_dies);
1130 
1131 	DRM_DEBUG("number of dies: %d\n", num_dies);
1132 
1133 	for (ii = 0; ii < num_dies; ii++) {
1134 		struct ip_die_entry *ip_die_entry;
1135 
1136 		die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1137 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1138 		num_ips = le16_to_cpu(dhdr->num_ips);
1139 		ip_offset = die_offset + sizeof(*dhdr);
1140 
1141 		/* Add the die to the kset.
1142 		 *
1143 		 * dhdr->die_id == ii, which was checked in
1144 		 * amdgpu_discovery_reg_base_init().
1145 		 */
1146 
1147 		ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1148 		if (!ip_die_entry)
1149 			return -ENOMEM;
1150 
1151 		ip_die_entry->num_ips = num_ips;
1152 
1153 		kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1154 		ip_die_entry->ip_kset.kobj.kset = die_kset;
1155 		ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1156 		res = kset_register(&ip_die_entry->ip_kset);
1157 		if (res) {
1158 			DRM_ERROR("Couldn't register ip_die_entry kset");
1159 			kfree(ip_die_entry);
1160 			return res;
1161 		}
1162 
1163 		amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1164 	}
1165 
1166 	return 0;
1167 }
1168 
1169 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1170 {
1171 	struct kset *die_kset;
1172 	int res, ii;
1173 
1174 	if (!adev->mman.discovery_bin)
1175 		return -EINVAL;
1176 
1177 	adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1178 	if (!adev->ip_top)
1179 		return -ENOMEM;
1180 
1181 	adev->ip_top->adev = adev;
1182 
1183 	res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1184 				   &adev->dev->kobj, "ip_discovery");
1185 	if (res) {
1186 		DRM_ERROR("Couldn't init and add ip_discovery/");
1187 		goto Err;
1188 	}
1189 
1190 	die_kset = &adev->ip_top->die_kset;
1191 	kobject_set_name(&die_kset->kobj, "%s", "die");
1192 	die_kset->kobj.parent = &adev->ip_top->kobj;
1193 	die_kset->kobj.ktype = &die_kobj_ktype;
1194 	res = kset_register(&adev->ip_top->die_kset);
1195 	if (res) {
1196 		DRM_ERROR("Couldn't register die_kset");
1197 		goto Err;
1198 	}
1199 
1200 	for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1201 		ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1202 	ip_hw_instance_attrs[ii] = NULL;
1203 
1204 	res = amdgpu_discovery_sysfs_recurse(adev);
1205 
1206 	return res;
1207 Err:
1208 	kobject_put(&adev->ip_top->kobj);
1209 	return res;
1210 }
1211 
1212 /* -------------------------------------------------- */
1213 
1214 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1215 
1216 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1217 {
1218 	struct list_head *el, *tmp;
1219 	struct kset *hw_id_kset;
1220 
1221 	hw_id_kset = &ip_hw_id->hw_id_kset;
1222 	spin_lock(&hw_id_kset->list_lock);
1223 	list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1224 		list_del_init(el);
1225 		spin_unlock(&hw_id_kset->list_lock);
1226 		/* kobject is embedded in ip_hw_instance */
1227 		kobject_put(list_to_kobj(el));
1228 		spin_lock(&hw_id_kset->list_lock);
1229 	}
1230 	spin_unlock(&hw_id_kset->list_lock);
1231 	kobject_put(&ip_hw_id->hw_id_kset.kobj);
1232 }
1233 
1234 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1235 {
1236 	struct list_head *el, *tmp;
1237 	struct kset *ip_kset;
1238 
1239 	ip_kset = &ip_die_entry->ip_kset;
1240 	spin_lock(&ip_kset->list_lock);
1241 	list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1242 		list_del_init(el);
1243 		spin_unlock(&ip_kset->list_lock);
1244 		amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1245 		spin_lock(&ip_kset->list_lock);
1246 	}
1247 	spin_unlock(&ip_kset->list_lock);
1248 	kobject_put(&ip_die_entry->ip_kset.kobj);
1249 }
1250 
1251 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1252 {
1253 	struct list_head *el, *tmp;
1254 	struct kset *die_kset;
1255 
1256 	die_kset = &adev->ip_top->die_kset;
1257 	spin_lock(&die_kset->list_lock);
1258 	list_for_each_prev_safe(el, tmp, &die_kset->list) {
1259 		list_del_init(el);
1260 		spin_unlock(&die_kset->list_lock);
1261 		amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1262 		spin_lock(&die_kset->list_lock);
1263 	}
1264 	spin_unlock(&die_kset->list_lock);
1265 	kobject_put(&adev->ip_top->die_kset.kobj);
1266 	kobject_put(&adev->ip_top->kobj);
1267 }
1268 
1269 /* ================================================== */
1270 
1271 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1272 {
1273 	uint8_t num_base_address, subrev, variant;
1274 	struct binary_header *bhdr;
1275 	struct ip_discovery_header *ihdr;
1276 	struct die_header *dhdr;
1277 	struct ip_v4 *ip;
1278 	uint16_t die_offset;
1279 	uint16_t ip_offset;
1280 	uint16_t num_dies;
1281 	uint16_t num_ips;
1282 	int hw_ip;
1283 	int i, j, k;
1284 	int r;
1285 
1286 	r = amdgpu_discovery_init(adev);
1287 	if (r) {
1288 		DRM_ERROR("amdgpu_discovery_init failed\n");
1289 		return r;
1290 	}
1291 
1292 	adev->gfx.xcc_mask = 0;
1293 	adev->sdma.sdma_mask = 0;
1294 	adev->vcn.inst_mask = 0;
1295 	adev->jpeg.inst_mask = 0;
1296 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1297 	ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1298 			le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1299 	num_dies = le16_to_cpu(ihdr->num_dies);
1300 
1301 	DRM_DEBUG("number of dies: %d\n", num_dies);
1302 
1303 	for (i = 0; i < num_dies; i++) {
1304 		die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1305 		dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1306 		num_ips = le16_to_cpu(dhdr->num_ips);
1307 		ip_offset = die_offset + sizeof(*dhdr);
1308 
1309 		if (le16_to_cpu(dhdr->die_id) != i) {
1310 			DRM_ERROR("invalid die id %d, expected %d\n",
1311 					le16_to_cpu(dhdr->die_id), i);
1312 			return -EINVAL;
1313 		}
1314 
1315 		DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1316 				le16_to_cpu(dhdr->die_id), num_ips);
1317 
1318 		for (j = 0; j < num_ips; j++) {
1319 			ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1320 
1321 			if (amdgpu_discovery_validate_ip(ip))
1322 				goto next_ip;
1323 
1324 			num_base_address = ip->num_base_address;
1325 
1326 			DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1327 				  hw_id_names[le16_to_cpu(ip->hw_id)],
1328 				  le16_to_cpu(ip->hw_id),
1329 				  ip->instance_number,
1330 				  ip->major, ip->minor,
1331 				  ip->revision);
1332 
1333 			if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1334 				/* Bit [5:0]: original revision value
1335 				 * Bit [7:6]: en/decode capability:
1336 				 *     0b00 : VCN function normally
1337 				 *     0b10 : encode is disabled
1338 				 *     0b01 : decode is disabled
1339 				 */
1340 				if (adev->vcn.num_vcn_inst <
1341 				    AMDGPU_MAX_VCN_INSTANCES) {
1342 					adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1343 						ip->revision & 0xc0;
1344 					adev->vcn.num_vcn_inst++;
1345 					adev->vcn.inst_mask |=
1346 						(1U << ip->instance_number);
1347 					adev->jpeg.inst_mask |=
1348 						(1U << ip->instance_number);
1349 				} else {
1350 					dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1351 						adev->vcn.num_vcn_inst + 1,
1352 						AMDGPU_MAX_VCN_INSTANCES);
1353 				}
1354 				ip->revision &= ~0xc0;
1355 			}
1356 			if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1357 			    le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1358 			    le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1359 			    le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1360 				if (adev->sdma.num_instances <
1361 				    AMDGPU_MAX_SDMA_INSTANCES) {
1362 					adev->sdma.num_instances++;
1363 					adev->sdma.sdma_mask |=
1364 						(1U << ip->instance_number);
1365 				} else {
1366 					dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1367 						adev->sdma.num_instances + 1,
1368 						AMDGPU_MAX_SDMA_INSTANCES);
1369 				}
1370 			}
1371 
1372 			if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1373 				if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1374 					adev->vpe.num_instances++;
1375 				else
1376 					dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1377 						adev->vpe.num_instances + 1,
1378 						AMDGPU_MAX_VPE_INSTANCES);
1379 			}
1380 
1381 			if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1382 				adev->gmc.num_umc++;
1383 				adev->umc.node_inst_num++;
1384 			}
1385 
1386 			if (le16_to_cpu(ip->hw_id) == GC_HWID)
1387 				adev->gfx.xcc_mask |=
1388 					(1U << ip->instance_number);
1389 
1390 			for (k = 0; k < num_base_address; k++) {
1391 				/*
1392 				 * convert the endianness of base addresses in place,
1393 				 * so that we don't need to convert them when accessing adev->reg_offset.
1394 				 */
1395 				if (ihdr->base_addr_64_bit)
1396 					/* Truncate the 64bit base address from ip discovery
1397 					 * and only store lower 32bit ip base in reg_offset[].
1398 					 * Bits > 32 follows ASIC specific format, thus just
1399 					 * discard them and handle it within specific ASIC.
1400 					 * By this way reg_offset[] and related helpers can
1401 					 * stay unchanged.
1402 					 * The base address is in dwords, thus clear the
1403 					 * highest 2 bits to store.
1404 					 */
1405 					ip->base_address[k] =
1406 						lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1407 				else
1408 					ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1409 				DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1410 			}
1411 
1412 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1413 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1414 				    hw_id_map[hw_ip] != 0) {
1415 					DRM_DEBUG("set register base offset for %s\n",
1416 							hw_id_names[le16_to_cpu(ip->hw_id)]);
1417 					adev->reg_offset[hw_ip][ip->instance_number] =
1418 						ip->base_address;
1419 					/* Instance support is somewhat inconsistent.
1420 					 * SDMA is a good example.  Sienna cichlid has 4 total
1421 					 * SDMA instances, each enumerated separately (HWIDs
1422 					 * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1423 					 * but they are enumerated as multiple instances of the
1424 					 * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1425 					 * example.  On most chips there are multiple instances
1426 					 * with the same HWID.
1427 					 */
1428 
1429 					if (ihdr->version < 3) {
1430 						subrev = 0;
1431 						variant = 0;
1432 					} else {
1433 						subrev = ip->sub_revision;
1434 						variant = ip->variant;
1435 					}
1436 
1437 					adev->ip_versions[hw_ip]
1438 							 [ip->instance_number] =
1439 						IP_VERSION_FULL(ip->major,
1440 								ip->minor,
1441 								ip->revision,
1442 								variant,
1443 								subrev);
1444 				}
1445 			}
1446 
1447 next_ip:
1448 			if (ihdr->base_addr_64_bit)
1449 				ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1450 			else
1451 				ip_offset += struct_size(ip, base_address, ip->num_base_address);
1452 		}
1453 	}
1454 
1455 	return 0;
1456 }
1457 
1458 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1459 {
1460 	int vcn_harvest_count = 0;
1461 	int umc_harvest_count = 0;
1462 
1463 	/*
1464 	 * Harvest table does not fit Navi1x and legacy GPUs,
1465 	 * so read harvest bit per IP data structure to set
1466 	 * harvest configuration.
1467 	 */
1468 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1469 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
1470 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4)) {
1471 		if ((adev->pdev->device == 0x731E &&
1472 			(adev->pdev->revision == 0xC6 ||
1473 			 adev->pdev->revision == 0xC7)) ||
1474 			(adev->pdev->device == 0x7340 &&
1475 			 adev->pdev->revision == 0xC9) ||
1476 			(adev->pdev->device == 0x7360 &&
1477 			 adev->pdev->revision == 0xC7))
1478 			amdgpu_discovery_read_harvest_bit_per_ip(adev,
1479 				&vcn_harvest_count);
1480 	} else {
1481 		amdgpu_discovery_read_from_harvest_table(adev,
1482 							 &vcn_harvest_count,
1483 							 &umc_harvest_count);
1484 	}
1485 
1486 	amdgpu_discovery_harvest_config_quirk(adev);
1487 
1488 	if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1489 		adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1490 		adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1491 	}
1492 
1493 	if (umc_harvest_count < adev->gmc.num_umc) {
1494 		adev->gmc.num_umc -= umc_harvest_count;
1495 	}
1496 }
1497 
1498 union gc_info {
1499 	struct gc_info_v1_0 v1;
1500 	struct gc_info_v1_1 v1_1;
1501 	struct gc_info_v1_2 v1_2;
1502 	struct gc_info_v2_0 v2;
1503 	struct gc_info_v2_1 v2_1;
1504 };
1505 
1506 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1507 {
1508 	struct binary_header *bhdr;
1509 	union gc_info *gc_info;
1510 	u16 offset;
1511 
1512 	if (!adev->mman.discovery_bin) {
1513 		DRM_ERROR("ip discovery uninitialized\n");
1514 		return -EINVAL;
1515 	}
1516 
1517 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1518 	offset = le16_to_cpu(bhdr->table_list[GC].offset);
1519 
1520 	if (!offset)
1521 		return 0;
1522 
1523 	gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1524 
1525 	switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1526 	case 1:
1527 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1528 		adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1529 						      le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1530 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1531 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1532 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1533 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1534 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1535 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1536 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1537 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1538 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1539 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1540 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1541 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1542 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1543 			le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1544 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1545 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1546 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1547 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1548 			adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1549 		}
1550 		if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1551 			adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1552 			adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1553 			adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1554 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1555 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1556 			adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1557 			adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1558 			adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1559 		}
1560 		break;
1561 	case 2:
1562 		adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1563 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1564 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1565 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1566 		adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1567 		adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1568 		adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1569 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1570 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1571 		adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1572 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1573 		adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1574 		adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1575 		adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1576 		adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1577 			le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1578 		adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1579 		if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1580 			adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1581 			adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1582 			adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1583 			adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1584 			adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1585 			adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1586 			adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1587 		}
1588 		break;
1589 	default:
1590 		dev_err(adev->dev,
1591 			"Unhandled GC info table %d.%d\n",
1592 			le16_to_cpu(gc_info->v1.header.version_major),
1593 			le16_to_cpu(gc_info->v1.header.version_minor));
1594 		return -EINVAL;
1595 	}
1596 	return 0;
1597 }
1598 
1599 union mall_info {
1600 	struct mall_info_v1_0 v1;
1601 	struct mall_info_v2_0 v2;
1602 };
1603 
1604 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1605 {
1606 	struct binary_header *bhdr;
1607 	union mall_info *mall_info;
1608 	u32 u, mall_size_per_umc, m_s_present, half_use;
1609 	u64 mall_size;
1610 	u16 offset;
1611 
1612 	if (!adev->mman.discovery_bin) {
1613 		DRM_ERROR("ip discovery uninitialized\n");
1614 		return -EINVAL;
1615 	}
1616 
1617 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1618 	offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1619 
1620 	if (!offset)
1621 		return 0;
1622 
1623 	mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1624 
1625 	switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1626 	case 1:
1627 		mall_size = 0;
1628 		mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1629 		m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1630 		half_use = le32_to_cpu(mall_info->v1.m_half_use);
1631 		for (u = 0; u < adev->gmc.num_umc; u++) {
1632 			if (m_s_present & (1 << u))
1633 				mall_size += mall_size_per_umc * 2;
1634 			else if (half_use & (1 << u))
1635 				mall_size += mall_size_per_umc / 2;
1636 			else
1637 				mall_size += mall_size_per_umc;
1638 		}
1639 		adev->gmc.mall_size = mall_size;
1640 		adev->gmc.m_half_use = half_use;
1641 		break;
1642 	case 2:
1643 		mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1644 		adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1645 		break;
1646 	default:
1647 		dev_err(adev->dev,
1648 			"Unhandled MALL info table %d.%d\n",
1649 			le16_to_cpu(mall_info->v1.header.version_major),
1650 			le16_to_cpu(mall_info->v1.header.version_minor));
1651 		return -EINVAL;
1652 	}
1653 	return 0;
1654 }
1655 
1656 union vcn_info {
1657 	struct vcn_info_v1_0 v1;
1658 };
1659 
1660 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1661 {
1662 	struct binary_header *bhdr;
1663 	union vcn_info *vcn_info;
1664 	u16 offset;
1665 	int v;
1666 
1667 	if (!adev->mman.discovery_bin) {
1668 		DRM_ERROR("ip discovery uninitialized\n");
1669 		return -EINVAL;
1670 	}
1671 
1672 	/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1673 	 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1674 	 * but that may change in the future with new GPUs so keep this
1675 	 * check for defensive purposes.
1676 	 */
1677 	if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1678 		dev_err(adev->dev, "invalid vcn instances\n");
1679 		return -EINVAL;
1680 	}
1681 
1682 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1683 	offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1684 
1685 	if (!offset)
1686 		return 0;
1687 
1688 	vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1689 
1690 	switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1691 	case 1:
1692 		/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1693 		 * so this won't overflow.
1694 		 */
1695 		for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1696 			adev->vcn.vcn_codec_disable_mask[v] =
1697 				le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1698 		}
1699 		break;
1700 	default:
1701 		dev_err(adev->dev,
1702 			"Unhandled VCN info table %d.%d\n",
1703 			le16_to_cpu(vcn_info->v1.header.version_major),
1704 			le16_to_cpu(vcn_info->v1.header.version_minor));
1705 		return -EINVAL;
1706 	}
1707 	return 0;
1708 }
1709 
1710 union nps_info {
1711 	struct nps_info_v1_0 v1;
1712 };
1713 
1714 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1715 				  uint32_t *nps_type,
1716 				  struct amdgpu_gmc_memrange **ranges,
1717 				  int *range_cnt)
1718 {
1719 	struct amdgpu_gmc_memrange *mem_ranges;
1720 	struct binary_header *bhdr;
1721 	union nps_info *nps_info;
1722 	u16 offset;
1723 	int i;
1724 
1725 	if (!nps_type || !range_cnt || !ranges)
1726 		return -EINVAL;
1727 
1728 	if (!adev->mman.discovery_bin) {
1729 		dev_err(adev->dev,
1730 			"fetch mem range failed, ip discovery uninitialized\n");
1731 		return -EINVAL;
1732 	}
1733 
1734 	bhdr = (struct binary_header *)adev->mman.discovery_bin;
1735 	offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
1736 
1737 	if (!offset)
1738 		return -ENOENT;
1739 
1740 	/* If verification fails, return as if NPS table doesn't exist */
1741 	if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
1742 		return -ENOENT;
1743 
1744 	nps_info = (union nps_info *)(adev->mman.discovery_bin + offset);
1745 
1746 	switch (le16_to_cpu(nps_info->v1.header.version_major)) {
1747 	case 1:
1748 		*nps_type = nps_info->v1.nps_type;
1749 		*range_cnt = nps_info->v1.count;
1750 		mem_ranges = kvzalloc(
1751 			*range_cnt * sizeof(struct amdgpu_gmc_memrange),
1752 			GFP_KERNEL);
1753 		for (i = 0; i < *range_cnt; i++) {
1754 			mem_ranges[i].base_address =
1755 				nps_info->v1.instance_info[i].base_address;
1756 			mem_ranges[i].limit_address =
1757 				nps_info->v1.instance_info[i].limit_address;
1758 			mem_ranges[i].nid_mask = -1;
1759 			mem_ranges[i].flags = 0;
1760 		}
1761 		*ranges = mem_ranges;
1762 		break;
1763 	default:
1764 		dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
1765 			le16_to_cpu(nps_info->v1.header.version_major),
1766 			le16_to_cpu(nps_info->v1.header.version_minor));
1767 		return -EINVAL;
1768 	}
1769 
1770 	return 0;
1771 }
1772 
1773 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1774 {
1775 	/* what IP to use for this? */
1776 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1777 	case IP_VERSION(9, 0, 1):
1778 	case IP_VERSION(9, 1, 0):
1779 	case IP_VERSION(9, 2, 1):
1780 	case IP_VERSION(9, 2, 2):
1781 	case IP_VERSION(9, 3, 0):
1782 	case IP_VERSION(9, 4, 0):
1783 	case IP_VERSION(9, 4, 1):
1784 	case IP_VERSION(9, 4, 2):
1785 	case IP_VERSION(9, 4, 3):
1786 	case IP_VERSION(9, 4, 4):
1787 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1788 		break;
1789 	case IP_VERSION(10, 1, 10):
1790 	case IP_VERSION(10, 1, 1):
1791 	case IP_VERSION(10, 1, 2):
1792 	case IP_VERSION(10, 1, 3):
1793 	case IP_VERSION(10, 1, 4):
1794 	case IP_VERSION(10, 3, 0):
1795 	case IP_VERSION(10, 3, 1):
1796 	case IP_VERSION(10, 3, 2):
1797 	case IP_VERSION(10, 3, 3):
1798 	case IP_VERSION(10, 3, 4):
1799 	case IP_VERSION(10, 3, 5):
1800 	case IP_VERSION(10, 3, 6):
1801 	case IP_VERSION(10, 3, 7):
1802 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1803 		break;
1804 	case IP_VERSION(11, 0, 0):
1805 	case IP_VERSION(11, 0, 1):
1806 	case IP_VERSION(11, 0, 2):
1807 	case IP_VERSION(11, 0, 3):
1808 	case IP_VERSION(11, 0, 4):
1809 	case IP_VERSION(11, 5, 0):
1810 	case IP_VERSION(11, 5, 1):
1811 		amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1812 		break;
1813 	case IP_VERSION(12, 0, 0):
1814 	case IP_VERSION(12, 0, 1):
1815 		amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
1816 		break;
1817 	default:
1818 		dev_err(adev->dev,
1819 			"Failed to add common ip block(GC_HWIP:0x%x)\n",
1820 			amdgpu_ip_version(adev, GC_HWIP, 0));
1821 		return -EINVAL;
1822 	}
1823 	return 0;
1824 }
1825 
1826 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1827 {
1828 	/* use GC or MMHUB IP version */
1829 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1830 	case IP_VERSION(9, 0, 1):
1831 	case IP_VERSION(9, 1, 0):
1832 	case IP_VERSION(9, 2, 1):
1833 	case IP_VERSION(9, 2, 2):
1834 	case IP_VERSION(9, 3, 0):
1835 	case IP_VERSION(9, 4, 0):
1836 	case IP_VERSION(9, 4, 1):
1837 	case IP_VERSION(9, 4, 2):
1838 	case IP_VERSION(9, 4, 3):
1839 	case IP_VERSION(9, 4, 4):
1840 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1841 		break;
1842 	case IP_VERSION(10, 1, 10):
1843 	case IP_VERSION(10, 1, 1):
1844 	case IP_VERSION(10, 1, 2):
1845 	case IP_VERSION(10, 1, 3):
1846 	case IP_VERSION(10, 1, 4):
1847 	case IP_VERSION(10, 3, 0):
1848 	case IP_VERSION(10, 3, 1):
1849 	case IP_VERSION(10, 3, 2):
1850 	case IP_VERSION(10, 3, 3):
1851 	case IP_VERSION(10, 3, 4):
1852 	case IP_VERSION(10, 3, 5):
1853 	case IP_VERSION(10, 3, 6):
1854 	case IP_VERSION(10, 3, 7):
1855 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1856 		break;
1857 	case IP_VERSION(11, 0, 0):
1858 	case IP_VERSION(11, 0, 1):
1859 	case IP_VERSION(11, 0, 2):
1860 	case IP_VERSION(11, 0, 3):
1861 	case IP_VERSION(11, 0, 4):
1862 	case IP_VERSION(11, 5, 0):
1863 	case IP_VERSION(11, 5, 1):
1864 		amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1865 		break;
1866 	case IP_VERSION(12, 0, 0):
1867 	case IP_VERSION(12, 0, 1):
1868 		amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
1869 		break;
1870 	default:
1871 		dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1872 			amdgpu_ip_version(adev, GC_HWIP, 0));
1873 		return -EINVAL;
1874 	}
1875 	return 0;
1876 }
1877 
1878 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1879 {
1880 	switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1881 	case IP_VERSION(4, 0, 0):
1882 	case IP_VERSION(4, 0, 1):
1883 	case IP_VERSION(4, 1, 0):
1884 	case IP_VERSION(4, 1, 1):
1885 	case IP_VERSION(4, 3, 0):
1886 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1887 		break;
1888 	case IP_VERSION(4, 2, 0):
1889 	case IP_VERSION(4, 2, 1):
1890 	case IP_VERSION(4, 4, 0):
1891 	case IP_VERSION(4, 4, 2):
1892 	case IP_VERSION(4, 4, 5):
1893 		amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1894 		break;
1895 	case IP_VERSION(5, 0, 0):
1896 	case IP_VERSION(5, 0, 1):
1897 	case IP_VERSION(5, 0, 2):
1898 	case IP_VERSION(5, 0, 3):
1899 	case IP_VERSION(5, 2, 0):
1900 	case IP_VERSION(5, 2, 1):
1901 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1902 		break;
1903 	case IP_VERSION(6, 0, 0):
1904 	case IP_VERSION(6, 0, 1):
1905 	case IP_VERSION(6, 0, 2):
1906 		amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1907 		break;
1908 	case IP_VERSION(6, 1, 0):
1909 		amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1910 		break;
1911 	case IP_VERSION(7, 0, 0):
1912 		amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
1913 		break;
1914 	default:
1915 		dev_err(adev->dev,
1916 			"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1917 			amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1918 		return -EINVAL;
1919 	}
1920 	return 0;
1921 }
1922 
1923 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1924 {
1925 	switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1926 	case IP_VERSION(9, 0, 0):
1927 		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1928 		break;
1929 	case IP_VERSION(10, 0, 0):
1930 	case IP_VERSION(10, 0, 1):
1931 		amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1932 		break;
1933 	case IP_VERSION(11, 0, 0):
1934 	case IP_VERSION(11, 0, 2):
1935 	case IP_VERSION(11, 0, 4):
1936 	case IP_VERSION(11, 0, 5):
1937 	case IP_VERSION(11, 0, 9):
1938 	case IP_VERSION(11, 0, 7):
1939 	case IP_VERSION(11, 0, 11):
1940 	case IP_VERSION(11, 0, 12):
1941 	case IP_VERSION(11, 0, 13):
1942 	case IP_VERSION(11, 5, 0):
1943 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1944 		break;
1945 	case IP_VERSION(11, 0, 8):
1946 		amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1947 		break;
1948 	case IP_VERSION(11, 0, 3):
1949 	case IP_VERSION(12, 0, 1):
1950 		amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1951 		break;
1952 	case IP_VERSION(13, 0, 0):
1953 	case IP_VERSION(13, 0, 1):
1954 	case IP_VERSION(13, 0, 2):
1955 	case IP_VERSION(13, 0, 3):
1956 	case IP_VERSION(13, 0, 5):
1957 	case IP_VERSION(13, 0, 6):
1958 	case IP_VERSION(13, 0, 7):
1959 	case IP_VERSION(13, 0, 8):
1960 	case IP_VERSION(13, 0, 10):
1961 	case IP_VERSION(13, 0, 11):
1962 	case IP_VERSION(13, 0, 14):
1963 	case IP_VERSION(14, 0, 0):
1964 	case IP_VERSION(14, 0, 1):
1965 		amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1966 		break;
1967 	case IP_VERSION(13, 0, 4):
1968 		amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1969 		break;
1970 	case IP_VERSION(14, 0, 2):
1971 	case IP_VERSION(14, 0, 3):
1972 		amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
1973 		break;
1974 	default:
1975 		dev_err(adev->dev,
1976 			"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1977 			amdgpu_ip_version(adev, MP0_HWIP, 0));
1978 		return -EINVAL;
1979 	}
1980 	return 0;
1981 }
1982 
1983 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1984 {
1985 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1986 	case IP_VERSION(9, 0, 0):
1987 	case IP_VERSION(10, 0, 0):
1988 	case IP_VERSION(10, 0, 1):
1989 	case IP_VERSION(11, 0, 2):
1990 		if (adev->asic_type == CHIP_ARCTURUS)
1991 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1992 		else
1993 			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1994 		break;
1995 	case IP_VERSION(11, 0, 0):
1996 	case IP_VERSION(11, 0, 5):
1997 	case IP_VERSION(11, 0, 9):
1998 	case IP_VERSION(11, 0, 7):
1999 	case IP_VERSION(11, 0, 8):
2000 	case IP_VERSION(11, 0, 11):
2001 	case IP_VERSION(11, 0, 12):
2002 	case IP_VERSION(11, 0, 13):
2003 	case IP_VERSION(11, 5, 0):
2004 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
2005 		break;
2006 	case IP_VERSION(12, 0, 0):
2007 	case IP_VERSION(12, 0, 1):
2008 		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
2009 		break;
2010 	case IP_VERSION(13, 0, 0):
2011 	case IP_VERSION(13, 0, 1):
2012 	case IP_VERSION(13, 0, 2):
2013 	case IP_VERSION(13, 0, 3):
2014 	case IP_VERSION(13, 0, 4):
2015 	case IP_VERSION(13, 0, 5):
2016 	case IP_VERSION(13, 0, 6):
2017 	case IP_VERSION(13, 0, 7):
2018 	case IP_VERSION(13, 0, 8):
2019 	case IP_VERSION(13, 0, 10):
2020 	case IP_VERSION(13, 0, 11):
2021 	case IP_VERSION(13, 0, 14):
2022 		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2023 		break;
2024 	case IP_VERSION(14, 0, 0):
2025 	case IP_VERSION(14, 0, 1):
2026 	case IP_VERSION(14, 0, 2):
2027 	case IP_VERSION(14, 0, 3):
2028 		amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2029 		break;
2030 	default:
2031 		dev_err(adev->dev,
2032 			"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2033 			amdgpu_ip_version(adev, MP1_HWIP, 0));
2034 		return -EINVAL;
2035 	}
2036 	return 0;
2037 }
2038 
2039 #if defined(CONFIG_DRM_AMD_DC)
2040 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2041 {
2042 	amdgpu_device_set_sriov_virtual_display(adev);
2043 	amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2044 }
2045 #endif
2046 
2047 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2048 {
2049 	if (adev->enable_virtual_display) {
2050 		amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2051 		return 0;
2052 	}
2053 
2054 	if (!amdgpu_device_has_dc_support(adev))
2055 		return 0;
2056 
2057 #if defined(CONFIG_DRM_AMD_DC)
2058 	if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2059 		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2060 		case IP_VERSION(1, 0, 0):
2061 		case IP_VERSION(1, 0, 1):
2062 		case IP_VERSION(2, 0, 2):
2063 		case IP_VERSION(2, 0, 0):
2064 		case IP_VERSION(2, 0, 3):
2065 		case IP_VERSION(2, 1, 0):
2066 		case IP_VERSION(3, 0, 0):
2067 		case IP_VERSION(3, 0, 2):
2068 		case IP_VERSION(3, 0, 3):
2069 		case IP_VERSION(3, 0, 1):
2070 		case IP_VERSION(3, 1, 2):
2071 		case IP_VERSION(3, 1, 3):
2072 		case IP_VERSION(3, 1, 4):
2073 		case IP_VERSION(3, 1, 5):
2074 		case IP_VERSION(3, 1, 6):
2075 		case IP_VERSION(3, 2, 0):
2076 		case IP_VERSION(3, 2, 1):
2077 		case IP_VERSION(3, 5, 0):
2078 		case IP_VERSION(3, 5, 1):
2079 		case IP_VERSION(4, 1, 0):
2080 			/* TODO: Fix IP version. DC code expects version 4.0.1 */
2081 			if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2082 				adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2083 
2084 			if (amdgpu_sriov_vf(adev))
2085 				amdgpu_discovery_set_sriov_display(adev);
2086 			else
2087 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2088 			break;
2089 		default:
2090 			dev_err(adev->dev,
2091 				"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2092 				amdgpu_ip_version(adev, DCE_HWIP, 0));
2093 			return -EINVAL;
2094 		}
2095 	} else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2096 		switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2097 		case IP_VERSION(12, 0, 0):
2098 		case IP_VERSION(12, 0, 1):
2099 		case IP_VERSION(12, 1, 0):
2100 			if (amdgpu_sriov_vf(adev))
2101 				amdgpu_discovery_set_sriov_display(adev);
2102 			else
2103 				amdgpu_device_ip_block_add(adev, &dm_ip_block);
2104 			break;
2105 		default:
2106 			dev_err(adev->dev,
2107 				"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2108 				amdgpu_ip_version(adev, DCI_HWIP, 0));
2109 			return -EINVAL;
2110 		}
2111 	}
2112 #endif
2113 	return 0;
2114 }
2115 
2116 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2117 {
2118 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2119 	case IP_VERSION(9, 0, 1):
2120 	case IP_VERSION(9, 1, 0):
2121 	case IP_VERSION(9, 2, 1):
2122 	case IP_VERSION(9, 2, 2):
2123 	case IP_VERSION(9, 3, 0):
2124 	case IP_VERSION(9, 4, 0):
2125 	case IP_VERSION(9, 4, 1):
2126 	case IP_VERSION(9, 4, 2):
2127 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2128 		break;
2129 	case IP_VERSION(9, 4, 3):
2130 	case IP_VERSION(9, 4, 4):
2131 		amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2132 		break;
2133 	case IP_VERSION(10, 1, 10):
2134 	case IP_VERSION(10, 1, 2):
2135 	case IP_VERSION(10, 1, 1):
2136 	case IP_VERSION(10, 1, 3):
2137 	case IP_VERSION(10, 1, 4):
2138 	case IP_VERSION(10, 3, 0):
2139 	case IP_VERSION(10, 3, 2):
2140 	case IP_VERSION(10, 3, 1):
2141 	case IP_VERSION(10, 3, 4):
2142 	case IP_VERSION(10, 3, 5):
2143 	case IP_VERSION(10, 3, 6):
2144 	case IP_VERSION(10, 3, 3):
2145 	case IP_VERSION(10, 3, 7):
2146 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2147 		break;
2148 	case IP_VERSION(11, 0, 0):
2149 	case IP_VERSION(11, 0, 1):
2150 	case IP_VERSION(11, 0, 2):
2151 	case IP_VERSION(11, 0, 3):
2152 	case IP_VERSION(11, 0, 4):
2153 	case IP_VERSION(11, 5, 0):
2154 	case IP_VERSION(11, 5, 1):
2155 		amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2156 		break;
2157 	case IP_VERSION(12, 0, 0):
2158 	case IP_VERSION(12, 0, 1):
2159 		if (!amdgpu_exp_hw_support)
2160 			return -EINVAL;
2161 		amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2162 		break;
2163 	default:
2164 		dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2165 			amdgpu_ip_version(adev, GC_HWIP, 0));
2166 		return -EINVAL;
2167 	}
2168 	return 0;
2169 }
2170 
2171 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2172 {
2173 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2174 	case IP_VERSION(4, 0, 0):
2175 	case IP_VERSION(4, 0, 1):
2176 	case IP_VERSION(4, 1, 0):
2177 	case IP_VERSION(4, 1, 1):
2178 	case IP_VERSION(4, 1, 2):
2179 	case IP_VERSION(4, 2, 0):
2180 	case IP_VERSION(4, 2, 2):
2181 	case IP_VERSION(4, 4, 0):
2182 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2183 		break;
2184 	case IP_VERSION(4, 4, 2):
2185 	case IP_VERSION(4, 4, 5):
2186 		amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2187 		break;
2188 	case IP_VERSION(5, 0, 0):
2189 	case IP_VERSION(5, 0, 1):
2190 	case IP_VERSION(5, 0, 2):
2191 	case IP_VERSION(5, 0, 5):
2192 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2193 		break;
2194 	case IP_VERSION(5, 2, 0):
2195 	case IP_VERSION(5, 2, 2):
2196 	case IP_VERSION(5, 2, 4):
2197 	case IP_VERSION(5, 2, 5):
2198 	case IP_VERSION(5, 2, 6):
2199 	case IP_VERSION(5, 2, 3):
2200 	case IP_VERSION(5, 2, 1):
2201 	case IP_VERSION(5, 2, 7):
2202 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2203 		break;
2204 	case IP_VERSION(6, 0, 0):
2205 	case IP_VERSION(6, 0, 1):
2206 	case IP_VERSION(6, 0, 2):
2207 	case IP_VERSION(6, 0, 3):
2208 	case IP_VERSION(6, 1, 0):
2209 	case IP_VERSION(6, 1, 1):
2210 		amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2211 		break;
2212 	case IP_VERSION(7, 0, 0):
2213 	case IP_VERSION(7, 0, 1):
2214 		amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2215 		break;
2216 	default:
2217 		dev_err(adev->dev,
2218 			"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2219 			amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2220 		return -EINVAL;
2221 	}
2222 	return 0;
2223 }
2224 
2225 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2226 {
2227 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2228 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2229 		case IP_VERSION(7, 0, 0):
2230 		case IP_VERSION(7, 2, 0):
2231 			/* UVD is not supported on vega20 SR-IOV */
2232 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2233 				amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2234 			break;
2235 		default:
2236 			dev_err(adev->dev,
2237 				"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2238 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2239 			return -EINVAL;
2240 		}
2241 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2242 		case IP_VERSION(4, 0, 0):
2243 		case IP_VERSION(4, 1, 0):
2244 			/* VCE is not supported on vega20 SR-IOV */
2245 			if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2246 				amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2247 			break;
2248 		default:
2249 			dev_err(adev->dev,
2250 				"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2251 				amdgpu_ip_version(adev, VCE_HWIP, 0));
2252 			return -EINVAL;
2253 		}
2254 	} else {
2255 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2256 		case IP_VERSION(1, 0, 0):
2257 		case IP_VERSION(1, 0, 1):
2258 			amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2259 			break;
2260 		case IP_VERSION(2, 0, 0):
2261 		case IP_VERSION(2, 0, 2):
2262 		case IP_VERSION(2, 2, 0):
2263 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2264 			if (!amdgpu_sriov_vf(adev))
2265 				amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2266 			break;
2267 		case IP_VERSION(2, 0, 3):
2268 			break;
2269 		case IP_VERSION(2, 5, 0):
2270 			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2271 			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2272 			break;
2273 		case IP_VERSION(2, 6, 0):
2274 			amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2275 			amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2276 			break;
2277 		case IP_VERSION(3, 0, 0):
2278 		case IP_VERSION(3, 0, 16):
2279 		case IP_VERSION(3, 1, 1):
2280 		case IP_VERSION(3, 1, 2):
2281 		case IP_VERSION(3, 0, 2):
2282 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2283 			if (!amdgpu_sriov_vf(adev))
2284 				amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2285 			break;
2286 		case IP_VERSION(3, 0, 33):
2287 			amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2288 			break;
2289 		case IP_VERSION(4, 0, 0):
2290 		case IP_VERSION(4, 0, 2):
2291 		case IP_VERSION(4, 0, 4):
2292 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2293 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2294 			break;
2295 		case IP_VERSION(4, 0, 3):
2296 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2297 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2298 			break;
2299 		case IP_VERSION(4, 0, 5):
2300 		case IP_VERSION(4, 0, 6):
2301 			amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2302 			amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2303 			break;
2304 		case IP_VERSION(5, 0, 0):
2305 			amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2306 			amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2307 			break;
2308 		default:
2309 			dev_err(adev->dev,
2310 				"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2311 				amdgpu_ip_version(adev, UVD_HWIP, 0));
2312 			return -EINVAL;
2313 		}
2314 	}
2315 	return 0;
2316 }
2317 
2318 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2319 {
2320 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2321 	case IP_VERSION(11, 0, 0):
2322 	case IP_VERSION(11, 0, 1):
2323 	case IP_VERSION(11, 0, 2):
2324 	case IP_VERSION(11, 0, 3):
2325 	case IP_VERSION(11, 0, 4):
2326 	case IP_VERSION(11, 5, 0):
2327 	case IP_VERSION(11, 5, 1):
2328 		amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2329 		adev->enable_mes = true;
2330 		adev->enable_mes_kiq = true;
2331 		break;
2332 	case IP_VERSION(12, 0, 0):
2333 	case IP_VERSION(12, 0, 1):
2334 		amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2335 		adev->enable_mes = true;
2336 		adev->enable_mes_kiq = true;
2337 		if (amdgpu_uni_mes)
2338 			adev->enable_uni_mes = true;
2339 		break;
2340 	default:
2341 		break;
2342 	}
2343 	return 0;
2344 }
2345 
2346 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2347 {
2348 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2349 	case IP_VERSION(9, 4, 3):
2350 	case IP_VERSION(9, 4, 4):
2351 		aqua_vanjaram_init_soc_config(adev);
2352 		break;
2353 	default:
2354 		break;
2355 	}
2356 }
2357 
2358 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2359 {
2360 	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2361 	case IP_VERSION(6, 1, 0):
2362 	case IP_VERSION(6, 1, 1):
2363 		amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2364 		break;
2365 	default:
2366 		break;
2367 	}
2368 
2369 	return 0;
2370 }
2371 
2372 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2373 {
2374 	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2375 	case IP_VERSION(4, 0, 5):
2376 	case IP_VERSION(4, 0, 6):
2377 		if (amdgpu_umsch_mm & 0x1) {
2378 			amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2379 			adev->enable_umsch_mm = true;
2380 		}
2381 		break;
2382 	default:
2383 		break;
2384 	}
2385 
2386 	return 0;
2387 }
2388 
2389 static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev)
2390 {
2391 #if defined(CONFIG_DRM_AMD_ISP)
2392 	switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
2393 	case IP_VERSION(4, 1, 0):
2394 		amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block);
2395 		break;
2396 	case IP_VERSION(4, 1, 1):
2397 		amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block);
2398 		break;
2399 	default:
2400 		break;
2401 	}
2402 #endif
2403 
2404 	return 0;
2405 }
2406 
2407 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2408 {
2409 	int r;
2410 
2411 	switch (adev->asic_type) {
2412 	case CHIP_VEGA10:
2413 		vega10_reg_base_init(adev);
2414 		adev->sdma.num_instances = 2;
2415 		adev->gmc.num_umc = 4;
2416 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2417 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2418 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2419 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2420 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2421 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2422 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2423 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2424 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2425 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2426 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2427 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2428 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2429 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2430 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2431 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2432 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2433 		break;
2434 	case CHIP_VEGA12:
2435 		vega10_reg_base_init(adev);
2436 		adev->sdma.num_instances = 2;
2437 		adev->gmc.num_umc = 4;
2438 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2439 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2440 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2441 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2442 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2443 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2444 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2445 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2446 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2447 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2448 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2449 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2450 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2451 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2452 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2453 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2454 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2455 		break;
2456 	case CHIP_RAVEN:
2457 		vega10_reg_base_init(adev);
2458 		adev->sdma.num_instances = 1;
2459 		adev->vcn.num_vcn_inst = 1;
2460 		adev->gmc.num_umc = 2;
2461 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2462 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2463 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2464 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2465 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2466 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2467 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2468 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2469 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2470 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2471 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2472 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2473 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2474 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2475 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2476 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2477 		} else {
2478 			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2479 			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2480 			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2481 			adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2482 			adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2483 			adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2484 			adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2485 			adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2486 			adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2487 			adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2488 			adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2489 			adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2490 			adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2491 			adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2492 			adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2493 		}
2494 		break;
2495 	case CHIP_VEGA20:
2496 		vega20_reg_base_init(adev);
2497 		adev->sdma.num_instances = 2;
2498 		adev->gmc.num_umc = 8;
2499 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2500 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2501 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2502 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2503 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2504 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2505 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2506 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2507 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2508 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2509 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2510 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2511 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2512 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2513 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2514 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2515 		adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2516 		adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2517 		break;
2518 	case CHIP_ARCTURUS:
2519 		arct_reg_base_init(adev);
2520 		adev->sdma.num_instances = 8;
2521 		adev->vcn.num_vcn_inst = 2;
2522 		adev->gmc.num_umc = 8;
2523 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2524 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2525 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2526 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2527 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2528 		adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2529 		adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2530 		adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2531 		adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2532 		adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2533 		adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2534 		adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2535 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2536 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2537 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2538 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2539 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2540 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2541 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2542 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2543 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2544 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2545 		break;
2546 	case CHIP_ALDEBARAN:
2547 		aldebaran_reg_base_init(adev);
2548 		adev->sdma.num_instances = 5;
2549 		adev->vcn.num_vcn_inst = 2;
2550 		adev->gmc.num_umc = 4;
2551 		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2552 		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2553 		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2554 		adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2555 		adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2556 		adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2557 		adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2558 		adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2559 		adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2560 		adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2561 		adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2562 		adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2563 		adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2564 		adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2565 		adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2566 		adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2567 		adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2568 		adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2569 		adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2570 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2571 		break;
2572 	default:
2573 		r = amdgpu_discovery_reg_base_init(adev);
2574 		if (r)
2575 			return -EINVAL;
2576 
2577 		amdgpu_discovery_harvest_ip(adev);
2578 		amdgpu_discovery_get_gfx_info(adev);
2579 		amdgpu_discovery_get_mall_info(adev);
2580 		amdgpu_discovery_get_vcn_info(adev);
2581 		break;
2582 	}
2583 
2584 	amdgpu_discovery_init_soc_config(adev);
2585 	amdgpu_discovery_sysfs_init(adev);
2586 
2587 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2588 	case IP_VERSION(9, 0, 1):
2589 	case IP_VERSION(9, 2, 1):
2590 	case IP_VERSION(9, 4, 0):
2591 	case IP_VERSION(9, 4, 1):
2592 	case IP_VERSION(9, 4, 2):
2593 	case IP_VERSION(9, 4, 3):
2594 	case IP_VERSION(9, 4, 4):
2595 		adev->family = AMDGPU_FAMILY_AI;
2596 		break;
2597 	case IP_VERSION(9, 1, 0):
2598 	case IP_VERSION(9, 2, 2):
2599 	case IP_VERSION(9, 3, 0):
2600 		adev->family = AMDGPU_FAMILY_RV;
2601 		break;
2602 	case IP_VERSION(10, 1, 10):
2603 	case IP_VERSION(10, 1, 1):
2604 	case IP_VERSION(10, 1, 2):
2605 	case IP_VERSION(10, 1, 3):
2606 	case IP_VERSION(10, 1, 4):
2607 	case IP_VERSION(10, 3, 0):
2608 	case IP_VERSION(10, 3, 2):
2609 	case IP_VERSION(10, 3, 4):
2610 	case IP_VERSION(10, 3, 5):
2611 		adev->family = AMDGPU_FAMILY_NV;
2612 		break;
2613 	case IP_VERSION(10, 3, 1):
2614 		adev->family = AMDGPU_FAMILY_VGH;
2615 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2616 		break;
2617 	case IP_VERSION(10, 3, 3):
2618 		adev->family = AMDGPU_FAMILY_YC;
2619 		break;
2620 	case IP_VERSION(10, 3, 6):
2621 		adev->family = AMDGPU_FAMILY_GC_10_3_6;
2622 		break;
2623 	case IP_VERSION(10, 3, 7):
2624 		adev->family = AMDGPU_FAMILY_GC_10_3_7;
2625 		break;
2626 	case IP_VERSION(11, 0, 0):
2627 	case IP_VERSION(11, 0, 2):
2628 	case IP_VERSION(11, 0, 3):
2629 		adev->family = AMDGPU_FAMILY_GC_11_0_0;
2630 		break;
2631 	case IP_VERSION(11, 0, 1):
2632 	case IP_VERSION(11, 0, 4):
2633 		adev->family = AMDGPU_FAMILY_GC_11_0_1;
2634 		break;
2635 	case IP_VERSION(11, 5, 0):
2636 	case IP_VERSION(11, 5, 1):
2637 		adev->family = AMDGPU_FAMILY_GC_11_5_0;
2638 		break;
2639 	case IP_VERSION(12, 0, 0):
2640 	case IP_VERSION(12, 0, 1):
2641 		adev->family = AMDGPU_FAMILY_GC_12_0_0;
2642 		break;
2643 	default:
2644 		return -EINVAL;
2645 	}
2646 
2647 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2648 	case IP_VERSION(9, 1, 0):
2649 	case IP_VERSION(9, 2, 2):
2650 	case IP_VERSION(9, 3, 0):
2651 	case IP_VERSION(10, 1, 3):
2652 	case IP_VERSION(10, 1, 4):
2653 	case IP_VERSION(10, 3, 1):
2654 	case IP_VERSION(10, 3, 3):
2655 	case IP_VERSION(10, 3, 6):
2656 	case IP_VERSION(10, 3, 7):
2657 	case IP_VERSION(11, 0, 1):
2658 	case IP_VERSION(11, 0, 4):
2659 	case IP_VERSION(11, 5, 0):
2660 	case IP_VERSION(11, 5, 1):
2661 		adev->flags |= AMD_IS_APU;
2662 		break;
2663 	default:
2664 		break;
2665 	}
2666 
2667 	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2668 		adev->gmc.xgmi.supported = true;
2669 
2670 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2671 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2672 		adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
2673 
2674 	/* set NBIO version */
2675 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2676 	case IP_VERSION(6, 1, 0):
2677 	case IP_VERSION(6, 2, 0):
2678 		adev->nbio.funcs = &nbio_v6_1_funcs;
2679 		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2680 		break;
2681 	case IP_VERSION(7, 0, 0):
2682 	case IP_VERSION(7, 0, 1):
2683 	case IP_VERSION(2, 5, 0):
2684 		adev->nbio.funcs = &nbio_v7_0_funcs;
2685 		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2686 		break;
2687 	case IP_VERSION(7, 4, 0):
2688 	case IP_VERSION(7, 4, 1):
2689 	case IP_VERSION(7, 4, 4):
2690 		adev->nbio.funcs = &nbio_v7_4_funcs;
2691 		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2692 		break;
2693 	case IP_VERSION(7, 9, 0):
2694 		adev->nbio.funcs = &nbio_v7_9_funcs;
2695 		adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2696 		break;
2697 	case IP_VERSION(7, 11, 0):
2698 	case IP_VERSION(7, 11, 1):
2699 		adev->nbio.funcs = &nbio_v7_11_funcs;
2700 		adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2701 		break;
2702 	case IP_VERSION(7, 2, 0):
2703 	case IP_VERSION(7, 2, 1):
2704 	case IP_VERSION(7, 3, 0):
2705 	case IP_VERSION(7, 5, 0):
2706 	case IP_VERSION(7, 5, 1):
2707 		adev->nbio.funcs = &nbio_v7_2_funcs;
2708 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2709 		break;
2710 	case IP_VERSION(2, 1, 1):
2711 	case IP_VERSION(2, 3, 0):
2712 	case IP_VERSION(2, 3, 1):
2713 	case IP_VERSION(2, 3, 2):
2714 	case IP_VERSION(3, 3, 0):
2715 	case IP_VERSION(3, 3, 1):
2716 	case IP_VERSION(3, 3, 2):
2717 	case IP_VERSION(3, 3, 3):
2718 		adev->nbio.funcs = &nbio_v2_3_funcs;
2719 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2720 		break;
2721 	case IP_VERSION(4, 3, 0):
2722 	case IP_VERSION(4, 3, 1):
2723 		if (amdgpu_sriov_vf(adev))
2724 			adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2725 		else
2726 			adev->nbio.funcs = &nbio_v4_3_funcs;
2727 		adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2728 		break;
2729 	case IP_VERSION(7, 7, 0):
2730 	case IP_VERSION(7, 7, 1):
2731 		adev->nbio.funcs = &nbio_v7_7_funcs;
2732 		adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2733 		break;
2734 	case IP_VERSION(6, 3, 1):
2735 		adev->nbio.funcs = &nbif_v6_3_1_funcs;
2736 		adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2737 		break;
2738 	default:
2739 		break;
2740 	}
2741 
2742 	switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2743 	case IP_VERSION(4, 0, 0):
2744 	case IP_VERSION(4, 0, 1):
2745 	case IP_VERSION(4, 1, 0):
2746 	case IP_VERSION(4, 1, 1):
2747 	case IP_VERSION(4, 1, 2):
2748 	case IP_VERSION(4, 2, 0):
2749 	case IP_VERSION(4, 2, 1):
2750 	case IP_VERSION(4, 4, 0):
2751 	case IP_VERSION(4, 4, 2):
2752 	case IP_VERSION(4, 4, 5):
2753 		adev->hdp.funcs = &hdp_v4_0_funcs;
2754 		break;
2755 	case IP_VERSION(5, 0, 0):
2756 	case IP_VERSION(5, 0, 1):
2757 	case IP_VERSION(5, 0, 2):
2758 	case IP_VERSION(5, 0, 3):
2759 	case IP_VERSION(5, 0, 4):
2760 	case IP_VERSION(5, 2, 0):
2761 		adev->hdp.funcs = &hdp_v5_0_funcs;
2762 		break;
2763 	case IP_VERSION(5, 2, 1):
2764 		adev->hdp.funcs = &hdp_v5_2_funcs;
2765 		break;
2766 	case IP_VERSION(6, 0, 0):
2767 	case IP_VERSION(6, 0, 1):
2768 	case IP_VERSION(6, 1, 0):
2769 		adev->hdp.funcs = &hdp_v6_0_funcs;
2770 		break;
2771 	case IP_VERSION(7, 0, 0):
2772 		adev->hdp.funcs = &hdp_v7_0_funcs;
2773 		break;
2774 	default:
2775 		break;
2776 	}
2777 
2778 	switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2779 	case IP_VERSION(3, 6, 0):
2780 	case IP_VERSION(3, 6, 1):
2781 	case IP_VERSION(3, 6, 2):
2782 		adev->df.funcs = &df_v3_6_funcs;
2783 		break;
2784 	case IP_VERSION(2, 1, 0):
2785 	case IP_VERSION(2, 1, 1):
2786 	case IP_VERSION(2, 5, 0):
2787 	case IP_VERSION(3, 5, 1):
2788 	case IP_VERSION(3, 5, 2):
2789 		adev->df.funcs = &df_v1_7_funcs;
2790 		break;
2791 	case IP_VERSION(4, 3, 0):
2792 		adev->df.funcs = &df_v4_3_funcs;
2793 		break;
2794 	case IP_VERSION(4, 6, 2):
2795 		adev->df.funcs = &df_v4_6_2_funcs;
2796 		break;
2797 	default:
2798 		break;
2799 	}
2800 
2801 	switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2802 	case IP_VERSION(9, 0, 0):
2803 	case IP_VERSION(9, 0, 1):
2804 	case IP_VERSION(10, 0, 0):
2805 	case IP_VERSION(10, 0, 1):
2806 	case IP_VERSION(10, 0, 2):
2807 		adev->smuio.funcs = &smuio_v9_0_funcs;
2808 		break;
2809 	case IP_VERSION(11, 0, 0):
2810 	case IP_VERSION(11, 0, 2):
2811 	case IP_VERSION(11, 0, 3):
2812 	case IP_VERSION(11, 0, 4):
2813 	case IP_VERSION(11, 0, 7):
2814 	case IP_VERSION(11, 0, 8):
2815 		adev->smuio.funcs = &smuio_v11_0_funcs;
2816 		break;
2817 	case IP_VERSION(11, 0, 6):
2818 	case IP_VERSION(11, 0, 10):
2819 	case IP_VERSION(11, 0, 11):
2820 	case IP_VERSION(11, 5, 0):
2821 	case IP_VERSION(13, 0, 1):
2822 	case IP_VERSION(13, 0, 9):
2823 	case IP_VERSION(13, 0, 10):
2824 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
2825 		break;
2826 	case IP_VERSION(13, 0, 2):
2827 		adev->smuio.funcs = &smuio_v13_0_funcs;
2828 		break;
2829 	case IP_VERSION(13, 0, 3):
2830 		adev->smuio.funcs = &smuio_v13_0_3_funcs;
2831 		if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2832 			adev->flags |= AMD_IS_APU;
2833 		}
2834 		break;
2835 	case IP_VERSION(13, 0, 6):
2836 	case IP_VERSION(13, 0, 8):
2837 	case IP_VERSION(14, 0, 0):
2838 	case IP_VERSION(14, 0, 1):
2839 		adev->smuio.funcs = &smuio_v13_0_6_funcs;
2840 		break;
2841 	case IP_VERSION(14, 0, 2):
2842 		adev->smuio.funcs = &smuio_v14_0_2_funcs;
2843 		break;
2844 	default:
2845 		break;
2846 	}
2847 
2848 	switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2849 	case IP_VERSION(6, 0, 0):
2850 	case IP_VERSION(6, 0, 1):
2851 	case IP_VERSION(6, 0, 2):
2852 	case IP_VERSION(6, 0, 3):
2853 		adev->lsdma.funcs = &lsdma_v6_0_funcs;
2854 		break;
2855 	case IP_VERSION(7, 0, 0):
2856 	case IP_VERSION(7, 0, 1):
2857 		adev->lsdma.funcs = &lsdma_v7_0_funcs;
2858 		break;
2859 	default:
2860 		break;
2861 	}
2862 
2863 	r = amdgpu_discovery_set_common_ip_blocks(adev);
2864 	if (r)
2865 		return r;
2866 
2867 	r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2868 	if (r)
2869 		return r;
2870 
2871 	/* For SR-IOV, PSP needs to be initialized before IH */
2872 	if (amdgpu_sriov_vf(adev)) {
2873 		r = amdgpu_discovery_set_psp_ip_blocks(adev);
2874 		if (r)
2875 			return r;
2876 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2877 		if (r)
2878 			return r;
2879 	} else {
2880 		r = amdgpu_discovery_set_ih_ip_blocks(adev);
2881 		if (r)
2882 			return r;
2883 
2884 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2885 			r = amdgpu_discovery_set_psp_ip_blocks(adev);
2886 			if (r)
2887 				return r;
2888 		}
2889 	}
2890 
2891 	if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2892 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2893 		if (r)
2894 			return r;
2895 	}
2896 
2897 	r = amdgpu_discovery_set_display_ip_blocks(adev);
2898 	if (r)
2899 		return r;
2900 
2901 	r = amdgpu_discovery_set_gc_ip_blocks(adev);
2902 	if (r)
2903 		return r;
2904 
2905 	r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2906 	if (r)
2907 		return r;
2908 
2909 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2910 	     !amdgpu_sriov_vf(adev)) ||
2911 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2912 		r = amdgpu_discovery_set_smu_ip_blocks(adev);
2913 		if (r)
2914 			return r;
2915 	}
2916 
2917 	r = amdgpu_discovery_set_mm_ip_blocks(adev);
2918 	if (r)
2919 		return r;
2920 
2921 	r = amdgpu_discovery_set_mes_ip_blocks(adev);
2922 	if (r)
2923 		return r;
2924 
2925 	r = amdgpu_discovery_set_vpe_ip_blocks(adev);
2926 	if (r)
2927 		return r;
2928 
2929 	r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
2930 	if (r)
2931 		return r;
2932 
2933 	r = amdgpu_discovery_set_isp_ip_blocks(adev);
2934 	if (r)
2935 		return r;
2936 	return 0;
2937 }
2938 
2939