1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include "drmP.h" 27 #include "amdgpu.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_ih.h" 30 #include "amdgpu_uvd.h" 31 #include "amdgpu_vce.h" 32 #include "amdgpu_ucode.h" 33 #include "atom.h" 34 #include "amd_pcie.h" 35 36 #include "gmc/gmc_8_1_d.h" 37 #include "gmc/gmc_8_1_sh_mask.h" 38 39 #include "oss/oss_3_0_d.h" 40 #include "oss/oss_3_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "gca/gfx_8_0_d.h" 46 #include "gca/gfx_8_0_sh_mask.h" 47 48 #include "smu/smu_7_1_1_d.h" 49 #include "smu/smu_7_1_1_sh_mask.h" 50 51 #include "uvd/uvd_5_0_d.h" 52 #include "uvd/uvd_5_0_sh_mask.h" 53 54 #include "vce/vce_3_0_d.h" 55 #include "vce/vce_3_0_sh_mask.h" 56 57 #include "dce/dce_10_0_d.h" 58 #include "dce/dce_10_0_sh_mask.h" 59 60 #include "vid.h" 61 #include "vi.h" 62 #include "vi_dpm.h" 63 #include "gmc_v8_0.h" 64 #include "gmc_v7_0.h" 65 #include "gfx_v8_0.h" 66 #include "sdma_v2_4.h" 67 #include "sdma_v3_0.h" 68 #include "dce_v10_0.h" 69 #include "dce_v11_0.h" 70 #include "iceland_ih.h" 71 #include "tonga_ih.h" 72 #include "cz_ih.h" 73 #include "uvd_v5_0.h" 74 #include "uvd_v6_0.h" 75 #include "vce_v3_0.h" 76 #include "amdgpu_powerplay.h" 77 #if defined(CONFIG_DRM_AMD_ACP) 78 #include "amdgpu_acp.h" 79 #endif 80 81 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); 82 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 83 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); 84 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); 85 86 /* 87 * Indirect registers accessor 88 */ 89 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 90 { 91 unsigned long flags; 92 u32 r; 93 94 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 95 WREG32(mmPCIE_INDEX, reg); 96 (void)RREG32(mmPCIE_INDEX); 97 r = RREG32(mmPCIE_DATA); 98 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 99 return r; 100 } 101 102 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 103 { 104 unsigned long flags; 105 106 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 107 WREG32(mmPCIE_INDEX, reg); 108 (void)RREG32(mmPCIE_INDEX); 109 WREG32(mmPCIE_DATA, v); 110 (void)RREG32(mmPCIE_DATA); 111 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 112 } 113 114 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 115 { 116 unsigned long flags; 117 u32 r; 118 119 spin_lock_irqsave(&adev->smc_idx_lock, flags); 120 WREG32(mmSMC_IND_INDEX_0, (reg)); 121 r = RREG32(mmSMC_IND_DATA_0); 122 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 123 return r; 124 } 125 126 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 127 { 128 unsigned long flags; 129 130 spin_lock_irqsave(&adev->smc_idx_lock, flags); 131 WREG32(mmSMC_IND_INDEX_0, (reg)); 132 WREG32(mmSMC_IND_DATA_0, (v)); 133 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 134 } 135 136 /* smu_8_0_d.h */ 137 #define mmMP0PUB_IND_INDEX 0x180 138 #define mmMP0PUB_IND_DATA 0x181 139 140 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 141 { 142 unsigned long flags; 143 u32 r; 144 145 spin_lock_irqsave(&adev->smc_idx_lock, flags); 146 WREG32(mmMP0PUB_IND_INDEX, (reg)); 147 r = RREG32(mmMP0PUB_IND_DATA); 148 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 149 return r; 150 } 151 152 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 153 { 154 unsigned long flags; 155 156 spin_lock_irqsave(&adev->smc_idx_lock, flags); 157 WREG32(mmMP0PUB_IND_INDEX, (reg)); 158 WREG32(mmMP0PUB_IND_DATA, (v)); 159 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 160 } 161 162 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 163 { 164 unsigned long flags; 165 u32 r; 166 167 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 168 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 169 r = RREG32(mmUVD_CTX_DATA); 170 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 171 return r; 172 } 173 174 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 175 { 176 unsigned long flags; 177 178 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 179 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 180 WREG32(mmUVD_CTX_DATA, (v)); 181 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 182 } 183 184 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 185 { 186 unsigned long flags; 187 u32 r; 188 189 spin_lock_irqsave(&adev->didt_idx_lock, flags); 190 WREG32(mmDIDT_IND_INDEX, (reg)); 191 r = RREG32(mmDIDT_IND_DATA); 192 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 193 return r; 194 } 195 196 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 197 { 198 unsigned long flags; 199 200 spin_lock_irqsave(&adev->didt_idx_lock, flags); 201 WREG32(mmDIDT_IND_INDEX, (reg)); 202 WREG32(mmDIDT_IND_DATA, (v)); 203 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 204 } 205 206 static const u32 tonga_mgcg_cgcg_init[] = 207 { 208 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 209 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 210 mmPCIE_DATA, 0x000f0000, 0x00000000, 211 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 212 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 213 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 214 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 215 }; 216 217 static const u32 fiji_mgcg_cgcg_init[] = 218 { 219 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 220 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 221 mmPCIE_DATA, 0x000f0000, 0x00000000, 222 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 223 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 224 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 225 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 226 }; 227 228 static const u32 iceland_mgcg_cgcg_init[] = 229 { 230 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 231 mmPCIE_DATA, 0x000f0000, 0x00000000, 232 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 233 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 234 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 235 }; 236 237 static const u32 cz_mgcg_cgcg_init[] = 238 { 239 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 240 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 241 mmPCIE_DATA, 0x000f0000, 0x00000000, 242 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 243 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 244 }; 245 246 static const u32 stoney_mgcg_cgcg_init[] = 247 { 248 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 249 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 250 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 251 }; 252 253 static void vi_init_golden_registers(struct amdgpu_device *adev) 254 { 255 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 256 mutex_lock(&adev->grbm_idx_mutex); 257 258 switch (adev->asic_type) { 259 case CHIP_TOPAZ: 260 amdgpu_program_register_sequence(adev, 261 iceland_mgcg_cgcg_init, 262 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 263 break; 264 case CHIP_FIJI: 265 amdgpu_program_register_sequence(adev, 266 fiji_mgcg_cgcg_init, 267 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); 268 break; 269 case CHIP_TONGA: 270 amdgpu_program_register_sequence(adev, 271 tonga_mgcg_cgcg_init, 272 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 273 break; 274 case CHIP_CARRIZO: 275 amdgpu_program_register_sequence(adev, 276 cz_mgcg_cgcg_init, 277 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 278 break; 279 case CHIP_STONEY: 280 amdgpu_program_register_sequence(adev, 281 stoney_mgcg_cgcg_init, 282 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 283 break; 284 case CHIP_POLARIS11: 285 case CHIP_POLARIS10: 286 default: 287 break; 288 } 289 mutex_unlock(&adev->grbm_idx_mutex); 290 } 291 292 /** 293 * vi_get_xclk - get the xclk 294 * 295 * @adev: amdgpu_device pointer 296 * 297 * Returns the reference clock used by the gfx engine 298 * (VI). 299 */ 300 static u32 vi_get_xclk(struct amdgpu_device *adev) 301 { 302 u32 reference_clock = adev->clock.spll.reference_freq; 303 u32 tmp; 304 305 if (adev->flags & AMD_IS_APU) 306 return reference_clock; 307 308 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 309 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 310 return 1000; 311 312 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 313 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 314 return reference_clock / 4; 315 316 return reference_clock; 317 } 318 319 /** 320 * vi_srbm_select - select specific register instances 321 * 322 * @adev: amdgpu_device pointer 323 * @me: selected ME (micro engine) 324 * @pipe: pipe 325 * @queue: queue 326 * @vmid: VMID 327 * 328 * Switches the currently active registers instances. Some 329 * registers are instanced per VMID, others are instanced per 330 * me/pipe/queue combination. 331 */ 332 void vi_srbm_select(struct amdgpu_device *adev, 333 u32 me, u32 pipe, u32 queue, u32 vmid) 334 { 335 u32 srbm_gfx_cntl = 0; 336 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 337 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 338 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 339 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 340 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 341 } 342 343 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 344 { 345 /* todo */ 346 } 347 348 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 349 { 350 u32 bus_cntl; 351 u32 d1vga_control = 0; 352 u32 d2vga_control = 0; 353 u32 vga_render_control = 0; 354 u32 rom_cntl; 355 bool r; 356 357 bus_cntl = RREG32(mmBUS_CNTL); 358 if (adev->mode_info.num_crtc) { 359 d1vga_control = RREG32(mmD1VGA_CONTROL); 360 d2vga_control = RREG32(mmD2VGA_CONTROL); 361 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 362 } 363 rom_cntl = RREG32_SMC(ixROM_CNTL); 364 365 /* enable the rom */ 366 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 367 if (adev->mode_info.num_crtc) { 368 /* Disable VGA mode */ 369 WREG32(mmD1VGA_CONTROL, 370 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 371 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 372 WREG32(mmD2VGA_CONTROL, 373 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 374 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 375 WREG32(mmVGA_RENDER_CONTROL, 376 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 377 } 378 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 379 380 r = amdgpu_read_bios(adev); 381 382 /* restore regs */ 383 WREG32(mmBUS_CNTL, bus_cntl); 384 if (adev->mode_info.num_crtc) { 385 WREG32(mmD1VGA_CONTROL, d1vga_control); 386 WREG32(mmD2VGA_CONTROL, d2vga_control); 387 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 388 } 389 WREG32_SMC(ixROM_CNTL, rom_cntl); 390 return r; 391 } 392 393 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 394 u8 *bios, u32 length_bytes) 395 { 396 u32 *dw_ptr; 397 unsigned long flags; 398 u32 i, length_dw; 399 400 if (bios == NULL) 401 return false; 402 if (length_bytes == 0) 403 return false; 404 /* APU vbios image is part of sbios image */ 405 if (adev->flags & AMD_IS_APU) 406 return false; 407 408 dw_ptr = (u32 *)bios; 409 length_dw = ALIGN(length_bytes, 4) / 4; 410 /* take the smc lock since we are using the smc index */ 411 spin_lock_irqsave(&adev->smc_idx_lock, flags); 412 /* set rom index to 0 */ 413 WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX); 414 WREG32(mmSMC_IND_DATA_0, 0); 415 /* set index to data for continous read */ 416 WREG32(mmSMC_IND_INDEX_0, ixROM_DATA); 417 for (i = 0; i < length_dw; i++) 418 dw_ptr[i] = RREG32(mmSMC_IND_DATA_0); 419 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 420 421 return true; 422 } 423 424 static u32 vi_get_virtual_caps(struct amdgpu_device *adev) 425 { 426 u32 caps = 0; 427 u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 428 429 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 430 caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; 431 432 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 433 caps |= AMDGPU_VIRT_CAPS_IS_VF; 434 435 return caps; 436 } 437 438 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 439 {mmGB_MACROTILE_MODE7, true}, 440 }; 441 442 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { 443 {mmGB_TILE_MODE7, true}, 444 {mmGB_TILE_MODE12, true}, 445 {mmGB_TILE_MODE17, true}, 446 {mmGB_TILE_MODE23, true}, 447 {mmGB_MACROTILE_MODE7, true}, 448 }; 449 450 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 451 {mmGRBM_STATUS, false}, 452 {mmGRBM_STATUS2, false}, 453 {mmGRBM_STATUS_SE0, false}, 454 {mmGRBM_STATUS_SE1, false}, 455 {mmGRBM_STATUS_SE2, false}, 456 {mmGRBM_STATUS_SE3, false}, 457 {mmSRBM_STATUS, false}, 458 {mmSRBM_STATUS2, false}, 459 {mmSRBM_STATUS3, false}, 460 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false}, 461 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false}, 462 {mmCP_STAT, false}, 463 {mmCP_STALLED_STAT1, false}, 464 {mmCP_STALLED_STAT2, false}, 465 {mmCP_STALLED_STAT3, false}, 466 {mmCP_CPF_BUSY_STAT, false}, 467 {mmCP_CPF_STALLED_STAT1, false}, 468 {mmCP_CPF_STATUS, false}, 469 {mmCP_CPC_BUSY_STAT, false}, 470 {mmCP_CPC_STALLED_STAT1, false}, 471 {mmCP_CPC_STATUS, false}, 472 {mmGB_ADDR_CONFIG, false}, 473 {mmMC_ARB_RAMCFG, false}, 474 {mmGB_TILE_MODE0, false}, 475 {mmGB_TILE_MODE1, false}, 476 {mmGB_TILE_MODE2, false}, 477 {mmGB_TILE_MODE3, false}, 478 {mmGB_TILE_MODE4, false}, 479 {mmGB_TILE_MODE5, false}, 480 {mmGB_TILE_MODE6, false}, 481 {mmGB_TILE_MODE7, false}, 482 {mmGB_TILE_MODE8, false}, 483 {mmGB_TILE_MODE9, false}, 484 {mmGB_TILE_MODE10, false}, 485 {mmGB_TILE_MODE11, false}, 486 {mmGB_TILE_MODE12, false}, 487 {mmGB_TILE_MODE13, false}, 488 {mmGB_TILE_MODE14, false}, 489 {mmGB_TILE_MODE15, false}, 490 {mmGB_TILE_MODE16, false}, 491 {mmGB_TILE_MODE17, false}, 492 {mmGB_TILE_MODE18, false}, 493 {mmGB_TILE_MODE19, false}, 494 {mmGB_TILE_MODE20, false}, 495 {mmGB_TILE_MODE21, false}, 496 {mmGB_TILE_MODE22, false}, 497 {mmGB_TILE_MODE23, false}, 498 {mmGB_TILE_MODE24, false}, 499 {mmGB_TILE_MODE25, false}, 500 {mmGB_TILE_MODE26, false}, 501 {mmGB_TILE_MODE27, false}, 502 {mmGB_TILE_MODE28, false}, 503 {mmGB_TILE_MODE29, false}, 504 {mmGB_TILE_MODE30, false}, 505 {mmGB_TILE_MODE31, false}, 506 {mmGB_MACROTILE_MODE0, false}, 507 {mmGB_MACROTILE_MODE1, false}, 508 {mmGB_MACROTILE_MODE2, false}, 509 {mmGB_MACROTILE_MODE3, false}, 510 {mmGB_MACROTILE_MODE4, false}, 511 {mmGB_MACROTILE_MODE5, false}, 512 {mmGB_MACROTILE_MODE6, false}, 513 {mmGB_MACROTILE_MODE7, false}, 514 {mmGB_MACROTILE_MODE8, false}, 515 {mmGB_MACROTILE_MODE9, false}, 516 {mmGB_MACROTILE_MODE10, false}, 517 {mmGB_MACROTILE_MODE11, false}, 518 {mmGB_MACROTILE_MODE12, false}, 519 {mmGB_MACROTILE_MODE13, false}, 520 {mmGB_MACROTILE_MODE14, false}, 521 {mmGB_MACROTILE_MODE15, false}, 522 {mmCC_RB_BACKEND_DISABLE, false, true}, 523 {mmGC_USER_RB_BACKEND_DISABLE, false, true}, 524 {mmGB_BACKEND_MAP, false, false}, 525 {mmPA_SC_RASTER_CONFIG, false, true}, 526 {mmPA_SC_RASTER_CONFIG_1, false, true}, 527 }; 528 529 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 530 u32 sh_num, u32 reg_offset) 531 { 532 uint32_t val; 533 534 mutex_lock(&adev->grbm_idx_mutex); 535 if (se_num != 0xffffffff || sh_num != 0xffffffff) 536 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 537 538 val = RREG32(reg_offset); 539 540 if (se_num != 0xffffffff || sh_num != 0xffffffff) 541 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 542 mutex_unlock(&adev->grbm_idx_mutex); 543 return val; 544 } 545 546 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 547 u32 sh_num, u32 reg_offset, u32 *value) 548 { 549 const struct amdgpu_allowed_register_entry *asic_register_table = NULL; 550 const struct amdgpu_allowed_register_entry *asic_register_entry; 551 uint32_t size, i; 552 553 *value = 0; 554 switch (adev->asic_type) { 555 case CHIP_TOPAZ: 556 asic_register_table = tonga_allowed_read_registers; 557 size = ARRAY_SIZE(tonga_allowed_read_registers); 558 break; 559 case CHIP_FIJI: 560 case CHIP_TONGA: 561 case CHIP_POLARIS11: 562 case CHIP_POLARIS10: 563 case CHIP_CARRIZO: 564 case CHIP_STONEY: 565 asic_register_table = cz_allowed_read_registers; 566 size = ARRAY_SIZE(cz_allowed_read_registers); 567 break; 568 default: 569 return -EINVAL; 570 } 571 572 if (asic_register_table) { 573 for (i = 0; i < size; i++) { 574 asic_register_entry = asic_register_table + i; 575 if (reg_offset != asic_register_entry->reg_offset) 576 continue; 577 if (!asic_register_entry->untouched) 578 *value = asic_register_entry->grbm_indexed ? 579 vi_read_indexed_register(adev, se_num, 580 sh_num, reg_offset) : 581 RREG32(reg_offset); 582 return 0; 583 } 584 } 585 586 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 587 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 588 continue; 589 590 if (!vi_allowed_read_registers[i].untouched) 591 *value = vi_allowed_read_registers[i].grbm_indexed ? 592 vi_read_indexed_register(adev, se_num, 593 sh_num, reg_offset) : 594 RREG32(reg_offset); 595 return 0; 596 } 597 return -EINVAL; 598 } 599 600 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 601 { 602 u32 i; 603 604 dev_info(adev->dev, "GPU pci config reset\n"); 605 606 /* disable BM */ 607 pci_clear_master(adev->pdev); 608 /* reset */ 609 amdgpu_pci_config_reset(adev); 610 611 udelay(100); 612 613 /* wait for asic to come out of reset */ 614 for (i = 0; i < adev->usec_timeout; i++) { 615 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 616 /* enable BM */ 617 pci_set_master(adev->pdev); 618 return 0; 619 } 620 udelay(1); 621 } 622 return -EINVAL; 623 } 624 625 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) 626 { 627 u32 tmp = RREG32(mmBIOS_SCRATCH_3); 628 629 if (hung) 630 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; 631 else 632 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; 633 634 WREG32(mmBIOS_SCRATCH_3, tmp); 635 } 636 637 /** 638 * vi_asic_reset - soft reset GPU 639 * 640 * @adev: amdgpu_device pointer 641 * 642 * Look up which blocks are hung and attempt 643 * to reset them. 644 * Returns 0 for success. 645 */ 646 static int vi_asic_reset(struct amdgpu_device *adev) 647 { 648 int r; 649 650 vi_set_bios_scratch_engine_hung(adev, true); 651 652 r = vi_gpu_pci_config_reset(adev); 653 654 vi_set_bios_scratch_engine_hung(adev, false); 655 656 return r; 657 } 658 659 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 660 u32 cntl_reg, u32 status_reg) 661 { 662 int r, i; 663 struct atom_clock_dividers dividers; 664 uint32_t tmp; 665 666 r = amdgpu_atombios_get_clock_dividers(adev, 667 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 668 clock, false, ÷rs); 669 if (r) 670 return r; 671 672 tmp = RREG32_SMC(cntl_reg); 673 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 674 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 675 tmp |= dividers.post_divider; 676 WREG32_SMC(cntl_reg, tmp); 677 678 for (i = 0; i < 100; i++) { 679 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) 680 break; 681 mdelay(10); 682 } 683 if (i == 100) 684 return -ETIMEDOUT; 685 686 return 0; 687 } 688 689 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 690 { 691 int r; 692 693 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 694 if (r) 695 return r; 696 697 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 698 699 return 0; 700 } 701 702 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 703 { 704 /* todo */ 705 706 return 0; 707 } 708 709 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 710 { 711 if (pci_is_root_bus(adev->pdev->bus)) 712 return; 713 714 if (amdgpu_pcie_gen2 == 0) 715 return; 716 717 if (adev->flags & AMD_IS_APU) 718 return; 719 720 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 721 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 722 return; 723 724 /* todo */ 725 } 726 727 static void vi_program_aspm(struct amdgpu_device *adev) 728 { 729 730 if (amdgpu_aspm == 0) 731 return; 732 733 /* todo */ 734 } 735 736 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 737 bool enable) 738 { 739 u32 tmp; 740 741 /* not necessary on CZ */ 742 if (adev->flags & AMD_IS_APU) 743 return; 744 745 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 746 if (enable) 747 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 748 else 749 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 750 751 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 752 } 753 754 /* topaz has no DCE, UVD, VCE */ 755 static const struct amdgpu_ip_block_version topaz_ip_blocks[] = 756 { 757 /* ORDER MATTERS! */ 758 { 759 .type = AMD_IP_BLOCK_TYPE_COMMON, 760 .major = 2, 761 .minor = 0, 762 .rev = 0, 763 .funcs = &vi_common_ip_funcs, 764 }, 765 { 766 .type = AMD_IP_BLOCK_TYPE_GMC, 767 .major = 7, 768 .minor = 4, 769 .rev = 0, 770 .funcs = &gmc_v7_0_ip_funcs, 771 }, 772 { 773 .type = AMD_IP_BLOCK_TYPE_IH, 774 .major = 2, 775 .minor = 4, 776 .rev = 0, 777 .funcs = &iceland_ih_ip_funcs, 778 }, 779 { 780 .type = AMD_IP_BLOCK_TYPE_SMC, 781 .major = 7, 782 .minor = 1, 783 .rev = 0, 784 .funcs = &amdgpu_pp_ip_funcs, 785 }, 786 { 787 .type = AMD_IP_BLOCK_TYPE_GFX, 788 .major = 8, 789 .minor = 0, 790 .rev = 0, 791 .funcs = &gfx_v8_0_ip_funcs, 792 }, 793 { 794 .type = AMD_IP_BLOCK_TYPE_SDMA, 795 .major = 2, 796 .minor = 4, 797 .rev = 0, 798 .funcs = &sdma_v2_4_ip_funcs, 799 }, 800 }; 801 802 static const struct amdgpu_ip_block_version tonga_ip_blocks[] = 803 { 804 /* ORDER MATTERS! */ 805 { 806 .type = AMD_IP_BLOCK_TYPE_COMMON, 807 .major = 2, 808 .minor = 0, 809 .rev = 0, 810 .funcs = &vi_common_ip_funcs, 811 }, 812 { 813 .type = AMD_IP_BLOCK_TYPE_GMC, 814 .major = 8, 815 .minor = 0, 816 .rev = 0, 817 .funcs = &gmc_v8_0_ip_funcs, 818 }, 819 { 820 .type = AMD_IP_BLOCK_TYPE_IH, 821 .major = 3, 822 .minor = 0, 823 .rev = 0, 824 .funcs = &tonga_ih_ip_funcs, 825 }, 826 { 827 .type = AMD_IP_BLOCK_TYPE_SMC, 828 .major = 7, 829 .minor = 1, 830 .rev = 0, 831 .funcs = &amdgpu_pp_ip_funcs, 832 }, 833 { 834 .type = AMD_IP_BLOCK_TYPE_DCE, 835 .major = 10, 836 .minor = 0, 837 .rev = 0, 838 .funcs = &dce_v10_0_ip_funcs, 839 }, 840 { 841 .type = AMD_IP_BLOCK_TYPE_GFX, 842 .major = 8, 843 .minor = 0, 844 .rev = 0, 845 .funcs = &gfx_v8_0_ip_funcs, 846 }, 847 { 848 .type = AMD_IP_BLOCK_TYPE_SDMA, 849 .major = 3, 850 .minor = 0, 851 .rev = 0, 852 .funcs = &sdma_v3_0_ip_funcs, 853 }, 854 { 855 .type = AMD_IP_BLOCK_TYPE_UVD, 856 .major = 5, 857 .minor = 0, 858 .rev = 0, 859 .funcs = &uvd_v5_0_ip_funcs, 860 }, 861 { 862 .type = AMD_IP_BLOCK_TYPE_VCE, 863 .major = 3, 864 .minor = 0, 865 .rev = 0, 866 .funcs = &vce_v3_0_ip_funcs, 867 }, 868 }; 869 870 static const struct amdgpu_ip_block_version fiji_ip_blocks[] = 871 { 872 /* ORDER MATTERS! */ 873 { 874 .type = AMD_IP_BLOCK_TYPE_COMMON, 875 .major = 2, 876 .minor = 0, 877 .rev = 0, 878 .funcs = &vi_common_ip_funcs, 879 }, 880 { 881 .type = AMD_IP_BLOCK_TYPE_GMC, 882 .major = 8, 883 .minor = 5, 884 .rev = 0, 885 .funcs = &gmc_v8_0_ip_funcs, 886 }, 887 { 888 .type = AMD_IP_BLOCK_TYPE_IH, 889 .major = 3, 890 .minor = 0, 891 .rev = 0, 892 .funcs = &tonga_ih_ip_funcs, 893 }, 894 { 895 .type = AMD_IP_BLOCK_TYPE_SMC, 896 .major = 7, 897 .minor = 1, 898 .rev = 0, 899 .funcs = &amdgpu_pp_ip_funcs, 900 }, 901 { 902 .type = AMD_IP_BLOCK_TYPE_DCE, 903 .major = 10, 904 .minor = 1, 905 .rev = 0, 906 .funcs = &dce_v10_0_ip_funcs, 907 }, 908 { 909 .type = AMD_IP_BLOCK_TYPE_GFX, 910 .major = 8, 911 .minor = 0, 912 .rev = 0, 913 .funcs = &gfx_v8_0_ip_funcs, 914 }, 915 { 916 .type = AMD_IP_BLOCK_TYPE_SDMA, 917 .major = 3, 918 .minor = 0, 919 .rev = 0, 920 .funcs = &sdma_v3_0_ip_funcs, 921 }, 922 { 923 .type = AMD_IP_BLOCK_TYPE_UVD, 924 .major = 6, 925 .minor = 0, 926 .rev = 0, 927 .funcs = &uvd_v6_0_ip_funcs, 928 }, 929 { 930 .type = AMD_IP_BLOCK_TYPE_VCE, 931 .major = 3, 932 .minor = 0, 933 .rev = 0, 934 .funcs = &vce_v3_0_ip_funcs, 935 }, 936 }; 937 938 static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = 939 { 940 /* ORDER MATTERS! */ 941 { 942 .type = AMD_IP_BLOCK_TYPE_COMMON, 943 .major = 2, 944 .minor = 0, 945 .rev = 0, 946 .funcs = &vi_common_ip_funcs, 947 }, 948 { 949 .type = AMD_IP_BLOCK_TYPE_GMC, 950 .major = 8, 951 .minor = 1, 952 .rev = 0, 953 .funcs = &gmc_v8_0_ip_funcs, 954 }, 955 { 956 .type = AMD_IP_BLOCK_TYPE_IH, 957 .major = 3, 958 .minor = 1, 959 .rev = 0, 960 .funcs = &tonga_ih_ip_funcs, 961 }, 962 { 963 .type = AMD_IP_BLOCK_TYPE_SMC, 964 .major = 7, 965 .minor = 2, 966 .rev = 0, 967 .funcs = &amdgpu_pp_ip_funcs, 968 }, 969 { 970 .type = AMD_IP_BLOCK_TYPE_DCE, 971 .major = 11, 972 .minor = 2, 973 .rev = 0, 974 .funcs = &dce_v11_0_ip_funcs, 975 }, 976 { 977 .type = AMD_IP_BLOCK_TYPE_GFX, 978 .major = 8, 979 .minor = 0, 980 .rev = 0, 981 .funcs = &gfx_v8_0_ip_funcs, 982 }, 983 { 984 .type = AMD_IP_BLOCK_TYPE_SDMA, 985 .major = 3, 986 .minor = 1, 987 .rev = 0, 988 .funcs = &sdma_v3_0_ip_funcs, 989 }, 990 { 991 .type = AMD_IP_BLOCK_TYPE_UVD, 992 .major = 6, 993 .minor = 3, 994 .rev = 0, 995 .funcs = &uvd_v6_0_ip_funcs, 996 }, 997 { 998 .type = AMD_IP_BLOCK_TYPE_VCE, 999 .major = 3, 1000 .minor = 4, 1001 .rev = 0, 1002 .funcs = &vce_v3_0_ip_funcs, 1003 }, 1004 }; 1005 1006 static const struct amdgpu_ip_block_version cz_ip_blocks[] = 1007 { 1008 /* ORDER MATTERS! */ 1009 { 1010 .type = AMD_IP_BLOCK_TYPE_COMMON, 1011 .major = 2, 1012 .minor = 0, 1013 .rev = 0, 1014 .funcs = &vi_common_ip_funcs, 1015 }, 1016 { 1017 .type = AMD_IP_BLOCK_TYPE_GMC, 1018 .major = 8, 1019 .minor = 0, 1020 .rev = 0, 1021 .funcs = &gmc_v8_0_ip_funcs, 1022 }, 1023 { 1024 .type = AMD_IP_BLOCK_TYPE_IH, 1025 .major = 3, 1026 .minor = 0, 1027 .rev = 0, 1028 .funcs = &cz_ih_ip_funcs, 1029 }, 1030 { 1031 .type = AMD_IP_BLOCK_TYPE_SMC, 1032 .major = 8, 1033 .minor = 0, 1034 .rev = 0, 1035 .funcs = &amdgpu_pp_ip_funcs 1036 }, 1037 { 1038 .type = AMD_IP_BLOCK_TYPE_DCE, 1039 .major = 11, 1040 .minor = 0, 1041 .rev = 0, 1042 .funcs = &dce_v11_0_ip_funcs, 1043 }, 1044 { 1045 .type = AMD_IP_BLOCK_TYPE_GFX, 1046 .major = 8, 1047 .minor = 0, 1048 .rev = 0, 1049 .funcs = &gfx_v8_0_ip_funcs, 1050 }, 1051 { 1052 .type = AMD_IP_BLOCK_TYPE_SDMA, 1053 .major = 3, 1054 .minor = 0, 1055 .rev = 0, 1056 .funcs = &sdma_v3_0_ip_funcs, 1057 }, 1058 { 1059 .type = AMD_IP_BLOCK_TYPE_UVD, 1060 .major = 6, 1061 .minor = 0, 1062 .rev = 0, 1063 .funcs = &uvd_v6_0_ip_funcs, 1064 }, 1065 { 1066 .type = AMD_IP_BLOCK_TYPE_VCE, 1067 .major = 3, 1068 .minor = 0, 1069 .rev = 0, 1070 .funcs = &vce_v3_0_ip_funcs, 1071 }, 1072 #if defined(CONFIG_DRM_AMD_ACP) 1073 { 1074 .type = AMD_IP_BLOCK_TYPE_ACP, 1075 .major = 2, 1076 .minor = 2, 1077 .rev = 0, 1078 .funcs = &acp_ip_funcs, 1079 }, 1080 #endif 1081 }; 1082 1083 int vi_set_ip_blocks(struct amdgpu_device *adev) 1084 { 1085 switch (adev->asic_type) { 1086 case CHIP_TOPAZ: 1087 adev->ip_blocks = topaz_ip_blocks; 1088 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); 1089 break; 1090 case CHIP_FIJI: 1091 adev->ip_blocks = fiji_ip_blocks; 1092 adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); 1093 break; 1094 case CHIP_TONGA: 1095 adev->ip_blocks = tonga_ip_blocks; 1096 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); 1097 break; 1098 case CHIP_POLARIS11: 1099 case CHIP_POLARIS10: 1100 adev->ip_blocks = polaris11_ip_blocks; 1101 adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); 1102 break; 1103 case CHIP_CARRIZO: 1104 case CHIP_STONEY: 1105 adev->ip_blocks = cz_ip_blocks; 1106 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); 1107 break; 1108 default: 1109 /* FIXME: not supported yet */ 1110 return -EINVAL; 1111 } 1112 1113 return 0; 1114 } 1115 1116 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 1117 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 1118 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 1119 1120 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 1121 { 1122 if (adev->flags & AMD_IS_APU) 1123 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 1124 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 1125 else 1126 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 1127 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 1128 } 1129 1130 static const struct amdgpu_asic_funcs vi_asic_funcs = 1131 { 1132 .read_disabled_bios = &vi_read_disabled_bios, 1133 .read_bios_from_rom = &vi_read_bios_from_rom, 1134 .read_register = &vi_read_register, 1135 .reset = &vi_asic_reset, 1136 .set_vga_state = &vi_vga_set_state, 1137 .get_xclk = &vi_get_xclk, 1138 .set_uvd_clocks = &vi_set_uvd_clocks, 1139 .set_vce_clocks = &vi_set_vce_clocks, 1140 .get_virtual_caps = &vi_get_virtual_caps, 1141 }; 1142 1143 static int vi_common_early_init(void *handle) 1144 { 1145 bool smc_enabled = false; 1146 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1147 1148 if (adev->flags & AMD_IS_APU) { 1149 adev->smc_rreg = &cz_smc_rreg; 1150 adev->smc_wreg = &cz_smc_wreg; 1151 } else { 1152 adev->smc_rreg = &vi_smc_rreg; 1153 adev->smc_wreg = &vi_smc_wreg; 1154 } 1155 adev->pcie_rreg = &vi_pcie_rreg; 1156 adev->pcie_wreg = &vi_pcie_wreg; 1157 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1158 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1159 adev->didt_rreg = &vi_didt_rreg; 1160 adev->didt_wreg = &vi_didt_wreg; 1161 1162 adev->asic_funcs = &vi_asic_funcs; 1163 1164 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && 1165 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) 1166 smc_enabled = true; 1167 1168 adev->rev_id = vi_get_rev_id(adev); 1169 adev->external_rev_id = 0xFF; 1170 switch (adev->asic_type) { 1171 case CHIP_TOPAZ: 1172 adev->cg_flags = 0; 1173 adev->pg_flags = 0; 1174 adev->external_rev_id = 0x1; 1175 break; 1176 case CHIP_FIJI: 1177 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1178 AMD_CG_SUPPORT_GFX_MGLS | 1179 AMD_CG_SUPPORT_GFX_RLC_LS | 1180 AMD_CG_SUPPORT_GFX_CP_LS | 1181 AMD_CG_SUPPORT_GFX_CGTS | 1182 AMD_CG_SUPPORT_GFX_CGTS_LS | 1183 AMD_CG_SUPPORT_GFX_CGCG | 1184 AMD_CG_SUPPORT_GFX_CGLS | 1185 AMD_CG_SUPPORT_SDMA_MGCG | 1186 AMD_CG_SUPPORT_SDMA_LS | 1187 AMD_CG_SUPPORT_BIF_LS | 1188 AMD_CG_SUPPORT_HDP_MGCG | 1189 AMD_CG_SUPPORT_HDP_LS | 1190 AMD_CG_SUPPORT_ROM_MGCG | 1191 AMD_CG_SUPPORT_MC_MGCG | 1192 AMD_CG_SUPPORT_MC_LS; 1193 adev->pg_flags = 0; 1194 adev->external_rev_id = adev->rev_id + 0x3c; 1195 break; 1196 case CHIP_TONGA: 1197 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; 1198 adev->pg_flags = 0; 1199 adev->external_rev_id = adev->rev_id + 0x14; 1200 break; 1201 case CHIP_POLARIS11: 1202 adev->cg_flags = 0; 1203 adev->pg_flags = 0; 1204 adev->external_rev_id = adev->rev_id + 0x5A; 1205 break; 1206 case CHIP_POLARIS10: 1207 adev->cg_flags = 0; 1208 adev->pg_flags = 0; 1209 adev->external_rev_id = adev->rev_id + 0x50; 1210 break; 1211 case CHIP_CARRIZO: 1212 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1213 AMD_CG_SUPPORT_GFX_MGCG | 1214 AMD_CG_SUPPORT_GFX_MGLS | 1215 AMD_CG_SUPPORT_GFX_RLC_LS | 1216 AMD_CG_SUPPORT_GFX_CP_LS | 1217 AMD_CG_SUPPORT_GFX_CGTS | 1218 AMD_CG_SUPPORT_GFX_MGLS | 1219 AMD_CG_SUPPORT_GFX_CGTS_LS | 1220 AMD_CG_SUPPORT_GFX_CGCG | 1221 AMD_CG_SUPPORT_GFX_CGLS | 1222 AMD_CG_SUPPORT_BIF_LS | 1223 AMD_CG_SUPPORT_HDP_MGCG | 1224 AMD_CG_SUPPORT_HDP_LS | 1225 AMD_CG_SUPPORT_SDMA_MGCG | 1226 AMD_CG_SUPPORT_SDMA_LS; 1227 /* rev0 hardware doesn't support PG */ 1228 adev->pg_flags = 0; 1229 if (adev->rev_id != 0x00) 1230 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1231 AMD_PG_SUPPORT_GFX_SMG | 1232 AMD_PG_SUPPORT_GFX_DMG | 1233 AMD_PG_SUPPORT_CP | 1234 AMD_PG_SUPPORT_RLC_SMU_HS | 1235 AMD_PG_SUPPORT_GFX_PIPELINE; 1236 adev->external_rev_id = adev->rev_id + 0x1; 1237 break; 1238 case CHIP_STONEY: 1239 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1240 AMD_CG_SUPPORT_GFX_MGCG | 1241 AMD_CG_SUPPORT_GFX_MGLS | 1242 AMD_CG_SUPPORT_GFX_RLC_LS | 1243 AMD_CG_SUPPORT_GFX_CP_LS | 1244 AMD_CG_SUPPORT_GFX_CGTS | 1245 AMD_CG_SUPPORT_GFX_MGLS | 1246 AMD_CG_SUPPORT_GFX_CGTS_LS | 1247 AMD_CG_SUPPORT_GFX_CGCG | 1248 AMD_CG_SUPPORT_GFX_CGLS | 1249 AMD_CG_SUPPORT_BIF_LS | 1250 AMD_CG_SUPPORT_HDP_MGCG | 1251 AMD_CG_SUPPORT_HDP_LS | 1252 AMD_CG_SUPPORT_SDMA_MGCG | 1253 AMD_CG_SUPPORT_SDMA_LS; 1254 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1255 AMD_PG_SUPPORT_GFX_SMG | 1256 AMD_PG_SUPPORT_GFX_DMG | 1257 AMD_PG_SUPPORT_GFX_PIPELINE | 1258 AMD_PG_SUPPORT_CP | 1259 AMD_PG_SUPPORT_RLC_SMU_HS; 1260 adev->external_rev_id = adev->rev_id + 0x1; 1261 break; 1262 default: 1263 /* FIXME: not supported yet */ 1264 return -EINVAL; 1265 } 1266 1267 if (amdgpu_smc_load_fw && smc_enabled) 1268 adev->firmware.smu_load = true; 1269 1270 amdgpu_get_pcie_info(adev); 1271 1272 return 0; 1273 } 1274 1275 static int vi_common_sw_init(void *handle) 1276 { 1277 return 0; 1278 } 1279 1280 static int vi_common_sw_fini(void *handle) 1281 { 1282 return 0; 1283 } 1284 1285 static int vi_common_hw_init(void *handle) 1286 { 1287 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1288 1289 /* move the golden regs per IP block */ 1290 vi_init_golden_registers(adev); 1291 /* enable pcie gen2/3 link */ 1292 vi_pcie_gen3_enable(adev); 1293 /* enable aspm */ 1294 vi_program_aspm(adev); 1295 /* enable the doorbell aperture */ 1296 vi_enable_doorbell_aperture(adev, true); 1297 1298 return 0; 1299 } 1300 1301 static int vi_common_hw_fini(void *handle) 1302 { 1303 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1304 1305 /* enable the doorbell aperture */ 1306 vi_enable_doorbell_aperture(adev, false); 1307 1308 return 0; 1309 } 1310 1311 static int vi_common_suspend(void *handle) 1312 { 1313 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1314 1315 return vi_common_hw_fini(adev); 1316 } 1317 1318 static int vi_common_resume(void *handle) 1319 { 1320 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1321 1322 return vi_common_hw_init(adev); 1323 } 1324 1325 static bool vi_common_is_idle(void *handle) 1326 { 1327 return true; 1328 } 1329 1330 static int vi_common_wait_for_idle(void *handle) 1331 { 1332 return 0; 1333 } 1334 1335 static int vi_common_soft_reset(void *handle) 1336 { 1337 return 0; 1338 } 1339 1340 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1341 bool enable) 1342 { 1343 uint32_t temp, data; 1344 1345 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1346 1347 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1348 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1349 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1350 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1351 else 1352 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1353 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1354 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1355 1356 if (temp != data) 1357 WREG32_PCIE(ixPCIE_CNTL2, data); 1358 } 1359 1360 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1361 bool enable) 1362 { 1363 uint32_t temp, data; 1364 1365 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1366 1367 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1368 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1369 else 1370 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1371 1372 if (temp != data) 1373 WREG32(mmHDP_HOST_PATH_CNTL, data); 1374 } 1375 1376 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1377 bool enable) 1378 { 1379 uint32_t temp, data; 1380 1381 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1382 1383 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1384 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1385 else 1386 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1387 1388 if (temp != data) 1389 WREG32(mmHDP_MEM_POWER_LS, data); 1390 } 1391 1392 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1393 bool enable) 1394 { 1395 uint32_t temp, data; 1396 1397 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1398 1399 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1400 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1401 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1402 else 1403 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1404 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1405 1406 if (temp != data) 1407 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1408 } 1409 1410 static int vi_common_set_clockgating_state(void *handle, 1411 enum amd_clockgating_state state) 1412 { 1413 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1414 1415 switch (adev->asic_type) { 1416 case CHIP_FIJI: 1417 vi_update_bif_medium_grain_light_sleep(adev, 1418 state == AMD_CG_STATE_GATE ? true : false); 1419 vi_update_hdp_medium_grain_clock_gating(adev, 1420 state == AMD_CG_STATE_GATE ? true : false); 1421 vi_update_hdp_light_sleep(adev, 1422 state == AMD_CG_STATE_GATE ? true : false); 1423 vi_update_rom_medium_grain_clock_gating(adev, 1424 state == AMD_CG_STATE_GATE ? true : false); 1425 break; 1426 case CHIP_CARRIZO: 1427 case CHIP_STONEY: 1428 vi_update_bif_medium_grain_light_sleep(adev, 1429 state == AMD_CG_STATE_GATE ? true : false); 1430 vi_update_hdp_medium_grain_clock_gating(adev, 1431 state == AMD_CG_STATE_GATE ? true : false); 1432 vi_update_hdp_light_sleep(adev, 1433 state == AMD_CG_STATE_GATE ? true : false); 1434 break; 1435 default: 1436 break; 1437 } 1438 return 0; 1439 } 1440 1441 static int vi_common_set_powergating_state(void *handle, 1442 enum amd_powergating_state state) 1443 { 1444 return 0; 1445 } 1446 1447 const struct amd_ip_funcs vi_common_ip_funcs = { 1448 .name = "vi_common", 1449 .early_init = vi_common_early_init, 1450 .late_init = NULL, 1451 .sw_init = vi_common_sw_init, 1452 .sw_fini = vi_common_sw_fini, 1453 .hw_init = vi_common_hw_init, 1454 .hw_fini = vi_common_hw_fini, 1455 .suspend = vi_common_suspend, 1456 .resume = vi_common_resume, 1457 .is_idle = vi_common_is_idle, 1458 .wait_for_idle = vi_common_wait_for_idle, 1459 .soft_reset = vi_common_soft_reset, 1460 .set_clockgating_state = vi_common_set_clockgating_state, 1461 .set_powergating_state = vi_common_set_powergating_state, 1462 }; 1463 1464