1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/power_supply.h> 29 #include <linux/kthread.h> 30 #include <linux/module.h> 31 #include <linux/console.h> 32 #include <linux/slab.h> 33 34 #include <drm/drm_atomic_helper.h> 35 #include <drm/drm_probe_helper.h> 36 #include <drm/amdgpu_drm.h> 37 #include <linux/vgaarb.h> 38 #include <linux/vga_switcheroo.h> 39 #include <linux/efi.h> 40 #include "amdgpu.h" 41 #include "amdgpu_trace.h" 42 #include "amdgpu_i2c.h" 43 #include "atom.h" 44 #include "amdgpu_atombios.h" 45 #include "amdgpu_atomfirmware.h" 46 #include "amd_pcie.h" 47 #ifdef CONFIG_DRM_AMDGPU_SI 48 #include "si.h" 49 #endif 50 #ifdef CONFIG_DRM_AMDGPU_CIK 51 #include "cik.h" 52 #endif 53 #include "vi.h" 54 #include "soc15.h" 55 #include "nv.h" 56 #include "bif/bif_4_1_d.h" 57 #include <linux/pci.h> 58 #include <linux/firmware.h> 59 #include "amdgpu_vf_error.h" 60 61 #include "amdgpu_amdkfd.h" 62 #include "amdgpu_pm.h" 63 64 #include "amdgpu_xgmi.h" 65 #include "amdgpu_ras.h" 66 #include "amdgpu_pmu.h" 67 68 #include <linux/suspend.h> 69 #include <drm/task_barrier.h> 70 71 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 72 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); 73 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); 74 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); 75 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); 76 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); 77 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin"); 78 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); 79 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); 80 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); 81 82 #define AMDGPU_RESUME_MS 2000 83 84 const char *amdgpu_asic_name[] = { 85 "TAHITI", 86 "PITCAIRN", 87 "VERDE", 88 "OLAND", 89 "HAINAN", 90 "BONAIRE", 91 "KAVERI", 92 "KABINI", 93 "HAWAII", 94 "MULLINS", 95 "TOPAZ", 96 "TONGA", 97 "FIJI", 98 "CARRIZO", 99 "STONEY", 100 "POLARIS10", 101 "POLARIS11", 102 "POLARIS12", 103 "VEGAM", 104 "VEGA10", 105 "VEGA12", 106 "VEGA20", 107 "RAVEN", 108 "ARCTURUS", 109 "RENOIR", 110 "NAVI10", 111 "NAVI14", 112 "NAVI12", 113 "LAST", 114 }; 115 116 /** 117 * DOC: pcie_replay_count 118 * 119 * The amdgpu driver provides a sysfs API for reporting the total number 120 * of PCIe replays (NAKs) 121 * The file pcie_replay_count is used for this and returns the total 122 * number of replays as a sum of the NAKs generated and NAKs received 123 */ 124 125 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, 126 struct device_attribute *attr, char *buf) 127 { 128 struct drm_device *ddev = dev_get_drvdata(dev); 129 struct amdgpu_device *adev = ddev->dev_private; 130 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); 131 132 return snprintf(buf, PAGE_SIZE, "%llu\n", cnt); 133 } 134 135 static DEVICE_ATTR(pcie_replay_count, S_IRUGO, 136 amdgpu_device_get_pcie_replay_count, NULL); 137 138 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); 139 140 /** 141 * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control 142 * 143 * @dev: drm_device pointer 144 * 145 * Returns true if the device is a dGPU with HG/PX power control, 146 * otherwise return false. 147 */ 148 bool amdgpu_device_supports_boco(struct drm_device *dev) 149 { 150 struct amdgpu_device *adev = dev->dev_private; 151 152 if (adev->flags & AMD_IS_PX) 153 return true; 154 return false; 155 } 156 157 /** 158 * amdgpu_device_supports_baco - Does the device support BACO 159 * 160 * @dev: drm_device pointer 161 * 162 * Returns true if the device supporte BACO, 163 * otherwise return false. 164 */ 165 bool amdgpu_device_supports_baco(struct drm_device *dev) 166 { 167 struct amdgpu_device *adev = dev->dev_private; 168 169 return amdgpu_asic_supports_baco(adev); 170 } 171 172 /** 173 * VRAM access helper functions. 174 * 175 * amdgpu_device_vram_access - read/write a buffer in vram 176 * 177 * @adev: amdgpu_device pointer 178 * @pos: offset of the buffer in vram 179 * @buf: virtual address of the buffer in system memory 180 * @size: read/write size, sizeof(@buf) must > @size 181 * @write: true - write to vram, otherwise - read from vram 182 */ 183 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 184 uint32_t *buf, size_t size, bool write) 185 { 186 unsigned long flags; 187 uint32_t hi = ~0; 188 uint64_t last; 189 190 191 #ifdef CONFIG_64BIT 192 last = min(pos + size, adev->gmc.visible_vram_size); 193 if (last > pos) { 194 void __iomem *addr = adev->mman.aper_base_kaddr + pos; 195 size_t count = last - pos; 196 197 if (write) { 198 memcpy_toio(addr, buf, count); 199 mb(); 200 amdgpu_asic_flush_hdp(adev, NULL); 201 } else { 202 amdgpu_asic_invalidate_hdp(adev, NULL); 203 mb(); 204 memcpy_fromio(buf, addr, count); 205 } 206 207 if (count == size) 208 return; 209 210 pos += count; 211 buf += count / 4; 212 size -= count; 213 } 214 #endif 215 216 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 217 for (last = pos + size; pos < last; pos += 4) { 218 uint32_t tmp = pos >> 31; 219 220 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); 221 if (tmp != hi) { 222 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); 223 hi = tmp; 224 } 225 if (write) 226 WREG32_NO_KIQ(mmMM_DATA, *buf++); 227 else 228 *buf++ = RREG32_NO_KIQ(mmMM_DATA); 229 } 230 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 231 } 232 233 /* 234 * MMIO register access helper functions. 235 */ 236 /** 237 * amdgpu_mm_rreg - read a memory mapped IO register 238 * 239 * @adev: amdgpu_device pointer 240 * @reg: dword aligned register offset 241 * @acc_flags: access flags which require special behavior 242 * 243 * Returns the 32 bit value from the offset specified. 244 */ 245 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 246 uint32_t acc_flags) 247 { 248 uint32_t ret; 249 250 if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) 251 return amdgpu_kiq_rreg(adev, reg); 252 253 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 254 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 255 else { 256 unsigned long flags; 257 258 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 259 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 260 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 261 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 262 } 263 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); 264 return ret; 265 } 266 267 /* 268 * MMIO register read with bytes helper functions 269 * @offset:bytes offset from MMIO start 270 * 271 */ 272 273 /** 274 * amdgpu_mm_rreg8 - read a memory mapped IO register 275 * 276 * @adev: amdgpu_device pointer 277 * @offset: byte aligned register offset 278 * 279 * Returns the 8 bit value from the offset specified. 280 */ 281 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) { 282 if (offset < adev->rmmio_size) 283 return (readb(adev->rmmio + offset)); 284 BUG(); 285 } 286 287 /* 288 * MMIO register write with bytes helper functions 289 * @offset:bytes offset from MMIO start 290 * @value: the value want to be written to the register 291 * 292 */ 293 /** 294 * amdgpu_mm_wreg8 - read a memory mapped IO register 295 * 296 * @adev: amdgpu_device pointer 297 * @offset: byte aligned register offset 298 * @value: 8 bit value to write 299 * 300 * Writes the value specified to the offset specified. 301 */ 302 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) { 303 if (offset < adev->rmmio_size) 304 writeb(value, adev->rmmio + offset); 305 else 306 BUG(); 307 } 308 309 /** 310 * amdgpu_mm_wreg - write to a memory mapped IO register 311 * 312 * @adev: amdgpu_device pointer 313 * @reg: dword aligned register offset 314 * @v: 32 bit value to write to the register 315 * @acc_flags: access flags which require special behavior 316 * 317 * Writes the value specified to the offset specified. 318 */ 319 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 320 uint32_t acc_flags) 321 { 322 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); 323 324 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { 325 adev->last_mm_index = v; 326 } 327 328 if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) 329 return amdgpu_kiq_wreg(adev, reg, v); 330 331 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 332 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 333 else { 334 unsigned long flags; 335 336 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 337 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 338 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 339 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 340 } 341 342 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { 343 udelay(500); 344 } 345 } 346 347 /** 348 * amdgpu_io_rreg - read an IO register 349 * 350 * @adev: amdgpu_device pointer 351 * @reg: dword aligned register offset 352 * 353 * Returns the 32 bit value from the offset specified. 354 */ 355 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) 356 { 357 if ((reg * 4) < adev->rio_mem_size) 358 return ioread32(adev->rio_mem + (reg * 4)); 359 else { 360 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); 361 return ioread32(adev->rio_mem + (mmMM_DATA * 4)); 362 } 363 } 364 365 /** 366 * amdgpu_io_wreg - write to an IO register 367 * 368 * @adev: amdgpu_device pointer 369 * @reg: dword aligned register offset 370 * @v: 32 bit value to write to the register 371 * 372 * Writes the value specified to the offset specified. 373 */ 374 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 375 { 376 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { 377 adev->last_mm_index = v; 378 } 379 380 if ((reg * 4) < adev->rio_mem_size) 381 iowrite32(v, adev->rio_mem + (reg * 4)); 382 else { 383 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); 384 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); 385 } 386 387 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { 388 udelay(500); 389 } 390 } 391 392 /** 393 * amdgpu_mm_rdoorbell - read a doorbell dword 394 * 395 * @adev: amdgpu_device pointer 396 * @index: doorbell index 397 * 398 * Returns the value in the doorbell aperture at the 399 * requested doorbell index (CIK). 400 */ 401 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) 402 { 403 if (index < adev->doorbell.num_doorbells) { 404 return readl(adev->doorbell.ptr + index); 405 } else { 406 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 407 return 0; 408 } 409 } 410 411 /** 412 * amdgpu_mm_wdoorbell - write a doorbell dword 413 * 414 * @adev: amdgpu_device pointer 415 * @index: doorbell index 416 * @v: value to write 417 * 418 * Writes @v to the doorbell aperture at the 419 * requested doorbell index (CIK). 420 */ 421 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) 422 { 423 if (index < adev->doorbell.num_doorbells) { 424 writel(v, adev->doorbell.ptr + index); 425 } else { 426 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 427 } 428 } 429 430 /** 431 * amdgpu_mm_rdoorbell64 - read a doorbell Qword 432 * 433 * @adev: amdgpu_device pointer 434 * @index: doorbell index 435 * 436 * Returns the value in the doorbell aperture at the 437 * requested doorbell index (VEGA10+). 438 */ 439 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) 440 { 441 if (index < adev->doorbell.num_doorbells) { 442 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); 443 } else { 444 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 445 return 0; 446 } 447 } 448 449 /** 450 * amdgpu_mm_wdoorbell64 - write a doorbell Qword 451 * 452 * @adev: amdgpu_device pointer 453 * @index: doorbell index 454 * @v: value to write 455 * 456 * Writes @v to the doorbell aperture at the 457 * requested doorbell index (VEGA10+). 458 */ 459 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) 460 { 461 if (index < adev->doorbell.num_doorbells) { 462 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); 463 } else { 464 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 465 } 466 } 467 468 /** 469 * amdgpu_invalid_rreg - dummy reg read function 470 * 471 * @adev: amdgpu device pointer 472 * @reg: offset of register 473 * 474 * Dummy register read function. Used for register blocks 475 * that certain asics don't have (all asics). 476 * Returns the value in the register. 477 */ 478 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) 479 { 480 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 481 BUG(); 482 return 0; 483 } 484 485 /** 486 * amdgpu_invalid_wreg - dummy reg write function 487 * 488 * @adev: amdgpu device pointer 489 * @reg: offset of register 490 * @v: value to write to the register 491 * 492 * Dummy register read function. Used for register blocks 493 * that certain asics don't have (all asics). 494 */ 495 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 496 { 497 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 498 reg, v); 499 BUG(); 500 } 501 502 /** 503 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function 504 * 505 * @adev: amdgpu device pointer 506 * @reg: offset of register 507 * 508 * Dummy register read function. Used for register blocks 509 * that certain asics don't have (all asics). 510 * Returns the value in the register. 511 */ 512 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) 513 { 514 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); 515 BUG(); 516 return 0; 517 } 518 519 /** 520 * amdgpu_invalid_wreg64 - dummy reg write function 521 * 522 * @adev: amdgpu device pointer 523 * @reg: offset of register 524 * @v: value to write to the register 525 * 526 * Dummy register read function. Used for register blocks 527 * that certain asics don't have (all asics). 528 */ 529 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) 530 { 531 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", 532 reg, v); 533 BUG(); 534 } 535 536 /** 537 * amdgpu_block_invalid_rreg - dummy reg read function 538 * 539 * @adev: amdgpu device pointer 540 * @block: offset of instance 541 * @reg: offset of register 542 * 543 * Dummy register read function. Used for register blocks 544 * that certain asics don't have (all asics). 545 * Returns the value in the register. 546 */ 547 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, 548 uint32_t block, uint32_t reg) 549 { 550 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", 551 reg, block); 552 BUG(); 553 return 0; 554 } 555 556 /** 557 * amdgpu_block_invalid_wreg - dummy reg write function 558 * 559 * @adev: amdgpu device pointer 560 * @block: offset of instance 561 * @reg: offset of register 562 * @v: value to write to the register 563 * 564 * Dummy register read function. Used for register blocks 565 * that certain asics don't have (all asics). 566 */ 567 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, 568 uint32_t block, 569 uint32_t reg, uint32_t v) 570 { 571 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 572 reg, block, v); 573 BUG(); 574 } 575 576 /** 577 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page 578 * 579 * @adev: amdgpu device pointer 580 * 581 * Allocates a scratch page of VRAM for use by various things in the 582 * driver. 583 */ 584 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) 585 { 586 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, 587 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 588 &adev->vram_scratch.robj, 589 &adev->vram_scratch.gpu_addr, 590 (void **)&adev->vram_scratch.ptr); 591 } 592 593 /** 594 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page 595 * 596 * @adev: amdgpu device pointer 597 * 598 * Frees the VRAM scratch page. 599 */ 600 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) 601 { 602 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); 603 } 604 605 /** 606 * amdgpu_device_program_register_sequence - program an array of registers. 607 * 608 * @adev: amdgpu_device pointer 609 * @registers: pointer to the register array 610 * @array_size: size of the register array 611 * 612 * Programs an array or registers with and and or masks. 613 * This is a helper for setting golden registers. 614 */ 615 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 616 const u32 *registers, 617 const u32 array_size) 618 { 619 u32 tmp, reg, and_mask, or_mask; 620 int i; 621 622 if (array_size % 3) 623 return; 624 625 for (i = 0; i < array_size; i +=3) { 626 reg = registers[i + 0]; 627 and_mask = registers[i + 1]; 628 or_mask = registers[i + 2]; 629 630 if (and_mask == 0xffffffff) { 631 tmp = or_mask; 632 } else { 633 tmp = RREG32(reg); 634 tmp &= ~and_mask; 635 if (adev->family >= AMDGPU_FAMILY_AI) 636 tmp |= (or_mask & and_mask); 637 else 638 tmp |= or_mask; 639 } 640 WREG32(reg, tmp); 641 } 642 } 643 644 /** 645 * amdgpu_device_pci_config_reset - reset the GPU 646 * 647 * @adev: amdgpu_device pointer 648 * 649 * Resets the GPU using the pci config reset sequence. 650 * Only applicable to asics prior to vega10. 651 */ 652 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) 653 { 654 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); 655 } 656 657 /* 658 * GPU doorbell aperture helpers function. 659 */ 660 /** 661 * amdgpu_device_doorbell_init - Init doorbell driver information. 662 * 663 * @adev: amdgpu_device pointer 664 * 665 * Init doorbell driver information (CIK) 666 * Returns 0 on success, error on failure. 667 */ 668 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) 669 { 670 671 /* No doorbell on SI hardware generation */ 672 if (adev->asic_type < CHIP_BONAIRE) { 673 adev->doorbell.base = 0; 674 adev->doorbell.size = 0; 675 adev->doorbell.num_doorbells = 0; 676 adev->doorbell.ptr = NULL; 677 return 0; 678 } 679 680 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) 681 return -EINVAL; 682 683 amdgpu_asic_init_doorbell_index(adev); 684 685 /* doorbell bar mapping */ 686 adev->doorbell.base = pci_resource_start(adev->pdev, 2); 687 adev->doorbell.size = pci_resource_len(adev->pdev, 2); 688 689 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), 690 adev->doorbell_index.max_assignment+1); 691 if (adev->doorbell.num_doorbells == 0) 692 return -EINVAL; 693 694 /* For Vega, reserve and map two pages on doorbell BAR since SDMA 695 * paging queue doorbell use the second page. The 696 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the 697 * doorbells are in the first page. So with paging queue enabled, 698 * the max num_doorbells should + 1 page (0x400 in dword) 699 */ 700 if (adev->asic_type >= CHIP_VEGA10) 701 adev->doorbell.num_doorbells += 0x400; 702 703 adev->doorbell.ptr = ioremap(adev->doorbell.base, 704 adev->doorbell.num_doorbells * 705 sizeof(u32)); 706 if (adev->doorbell.ptr == NULL) 707 return -ENOMEM; 708 709 return 0; 710 } 711 712 /** 713 * amdgpu_device_doorbell_fini - Tear down doorbell driver information. 714 * 715 * @adev: amdgpu_device pointer 716 * 717 * Tear down doorbell driver information (CIK) 718 */ 719 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) 720 { 721 iounmap(adev->doorbell.ptr); 722 adev->doorbell.ptr = NULL; 723 } 724 725 726 727 /* 728 * amdgpu_device_wb_*() 729 * Writeback is the method by which the GPU updates special pages in memory 730 * with the status of certain GPU events (fences, ring pointers,etc.). 731 */ 732 733 /** 734 * amdgpu_device_wb_fini - Disable Writeback and free memory 735 * 736 * @adev: amdgpu_device pointer 737 * 738 * Disables Writeback and frees the Writeback memory (all asics). 739 * Used at driver shutdown. 740 */ 741 static void amdgpu_device_wb_fini(struct amdgpu_device *adev) 742 { 743 if (adev->wb.wb_obj) { 744 amdgpu_bo_free_kernel(&adev->wb.wb_obj, 745 &adev->wb.gpu_addr, 746 (void **)&adev->wb.wb); 747 adev->wb.wb_obj = NULL; 748 } 749 } 750 751 /** 752 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory 753 * 754 * @adev: amdgpu_device pointer 755 * 756 * Initializes writeback and allocates writeback memory (all asics). 757 * Used at driver startup. 758 * Returns 0 on success or an -error on failure. 759 */ 760 static int amdgpu_device_wb_init(struct amdgpu_device *adev) 761 { 762 int r; 763 764 if (adev->wb.wb_obj == NULL) { 765 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ 766 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, 767 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 768 &adev->wb.wb_obj, &adev->wb.gpu_addr, 769 (void **)&adev->wb.wb); 770 if (r) { 771 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 772 return r; 773 } 774 775 adev->wb.num_wb = AMDGPU_MAX_WB; 776 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 777 778 /* clear wb memory */ 779 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); 780 } 781 782 return 0; 783 } 784 785 /** 786 * amdgpu_device_wb_get - Allocate a wb entry 787 * 788 * @adev: amdgpu_device pointer 789 * @wb: wb index 790 * 791 * Allocate a wb slot for use by the driver (all asics). 792 * Returns 0 on success or -EINVAL on failure. 793 */ 794 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) 795 { 796 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); 797 798 if (offset < adev->wb.num_wb) { 799 __set_bit(offset, adev->wb.used); 800 *wb = offset << 3; /* convert to dw offset */ 801 return 0; 802 } else { 803 return -EINVAL; 804 } 805 } 806 807 /** 808 * amdgpu_device_wb_free - Free a wb entry 809 * 810 * @adev: amdgpu_device pointer 811 * @wb: wb index 812 * 813 * Free a wb slot allocated for use by the driver (all asics) 814 */ 815 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) 816 { 817 wb >>= 3; 818 if (wb < adev->wb.num_wb) 819 __clear_bit(wb, adev->wb.used); 820 } 821 822 /** 823 * amdgpu_device_resize_fb_bar - try to resize FB BAR 824 * 825 * @adev: amdgpu_device pointer 826 * 827 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not 828 * to fail, but if any of the BARs is not accessible after the size we abort 829 * driver loading by returning -ENODEV. 830 */ 831 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) 832 { 833 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size); 834 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; 835 struct pci_bus *root; 836 struct resource *res; 837 unsigned i; 838 u16 cmd; 839 int r; 840 841 /* Bypass for VF */ 842 if (amdgpu_sriov_vf(adev)) 843 return 0; 844 845 /* Check if the root BUS has 64bit memory resources */ 846 root = adev->pdev->bus; 847 while (root->parent) 848 root = root->parent; 849 850 pci_bus_for_each_resource(root, res, i) { 851 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && 852 res->start > 0x100000000ull) 853 break; 854 } 855 856 /* Trying to resize is pointless without a root hub window above 4GB */ 857 if (!res) 858 return 0; 859 860 /* Disable memory decoding while we change the BAR addresses and size */ 861 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); 862 pci_write_config_word(adev->pdev, PCI_COMMAND, 863 cmd & ~PCI_COMMAND_MEMORY); 864 865 /* Free the VRAM and doorbell BAR, we most likely need to move both. */ 866 amdgpu_device_doorbell_fini(adev); 867 if (adev->asic_type >= CHIP_BONAIRE) 868 pci_release_resource(adev->pdev, 2); 869 870 pci_release_resource(adev->pdev, 0); 871 872 r = pci_resize_resource(adev->pdev, 0, rbar_size); 873 if (r == -ENOSPC) 874 DRM_INFO("Not enough PCI address space for a large BAR."); 875 else if (r && r != -ENOTSUPP) 876 DRM_ERROR("Problem resizing BAR0 (%d).", r); 877 878 pci_assign_unassigned_bus_resources(adev->pdev->bus); 879 880 /* When the doorbell or fb BAR isn't available we have no chance of 881 * using the device. 882 */ 883 r = amdgpu_device_doorbell_init(adev); 884 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) 885 return -ENODEV; 886 887 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); 888 889 return 0; 890 } 891 892 /* 893 * GPU helpers function. 894 */ 895 /** 896 * amdgpu_device_need_post - check if the hw need post or not 897 * 898 * @adev: amdgpu_device pointer 899 * 900 * Check if the asic has been initialized (all asics) at driver startup 901 * or post is needed if hw reset is performed. 902 * Returns true if need or false if not. 903 */ 904 bool amdgpu_device_need_post(struct amdgpu_device *adev) 905 { 906 uint32_t reg; 907 908 if (amdgpu_sriov_vf(adev)) 909 return false; 910 911 if (amdgpu_passthrough(adev)) { 912 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 913 * some old smc fw still need driver do vPost otherwise gpu hang, while 914 * those smc fw version above 22.15 doesn't have this flaw, so we force 915 * vpost executed for smc version below 22.15 916 */ 917 if (adev->asic_type == CHIP_FIJI) { 918 int err; 919 uint32_t fw_ver; 920 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); 921 /* force vPost if error occured */ 922 if (err) 923 return true; 924 925 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 926 if (fw_ver < 0x00160e00) 927 return true; 928 } 929 } 930 931 if (adev->has_hw_reset) { 932 adev->has_hw_reset = false; 933 return true; 934 } 935 936 /* bios scratch used on CIK+ */ 937 if (adev->asic_type >= CHIP_BONAIRE) 938 return amdgpu_atombios_scratch_need_asic_init(adev); 939 940 /* check MEM_SIZE for older asics */ 941 reg = amdgpu_asic_get_config_memsize(adev); 942 943 if ((reg != 0) && (reg != 0xffffffff)) 944 return false; 945 946 return true; 947 } 948 949 /* if we get transitioned to only one device, take VGA back */ 950 /** 951 * amdgpu_device_vga_set_decode - enable/disable vga decode 952 * 953 * @cookie: amdgpu_device pointer 954 * @state: enable/disable vga decode 955 * 956 * Enable/disable vga decode (all asics). 957 * Returns VGA resource flags. 958 */ 959 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state) 960 { 961 struct amdgpu_device *adev = cookie; 962 amdgpu_asic_set_vga_state(adev, state); 963 if (state) 964 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 965 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 966 else 967 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 968 } 969 970 /** 971 * amdgpu_device_check_block_size - validate the vm block size 972 * 973 * @adev: amdgpu_device pointer 974 * 975 * Validates the vm block size specified via module parameter. 976 * The vm block size defines number of bits in page table versus page directory, 977 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 978 * page table and the remaining bits are in the page directory. 979 */ 980 static void amdgpu_device_check_block_size(struct amdgpu_device *adev) 981 { 982 /* defines number of bits in page table versus page directory, 983 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 984 * page table and the remaining bits are in the page directory */ 985 if (amdgpu_vm_block_size == -1) 986 return; 987 988 if (amdgpu_vm_block_size < 9) { 989 dev_warn(adev->dev, "VM page table size (%d) too small\n", 990 amdgpu_vm_block_size); 991 amdgpu_vm_block_size = -1; 992 } 993 } 994 995 /** 996 * amdgpu_device_check_vm_size - validate the vm size 997 * 998 * @adev: amdgpu_device pointer 999 * 1000 * Validates the vm size in GB specified via module parameter. 1001 * The VM size is the size of the GPU virtual memory space in GB. 1002 */ 1003 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) 1004 { 1005 /* no need to check the default value */ 1006 if (amdgpu_vm_size == -1) 1007 return; 1008 1009 if (amdgpu_vm_size < 1) { 1010 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", 1011 amdgpu_vm_size); 1012 amdgpu_vm_size = -1; 1013 } 1014 } 1015 1016 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) 1017 { 1018 struct sysinfo si; 1019 bool is_os_64 = (sizeof(void *) == 8); 1020 uint64_t total_memory; 1021 uint64_t dram_size_seven_GB = 0x1B8000000; 1022 uint64_t dram_size_three_GB = 0xB8000000; 1023 1024 if (amdgpu_smu_memory_pool_size == 0) 1025 return; 1026 1027 if (!is_os_64) { 1028 DRM_WARN("Not 64-bit OS, feature not supported\n"); 1029 goto def_value; 1030 } 1031 si_meminfo(&si); 1032 total_memory = (uint64_t)si.totalram * si.mem_unit; 1033 1034 if ((amdgpu_smu_memory_pool_size == 1) || 1035 (amdgpu_smu_memory_pool_size == 2)) { 1036 if (total_memory < dram_size_three_GB) 1037 goto def_value1; 1038 } else if ((amdgpu_smu_memory_pool_size == 4) || 1039 (amdgpu_smu_memory_pool_size == 8)) { 1040 if (total_memory < dram_size_seven_GB) 1041 goto def_value1; 1042 } else { 1043 DRM_WARN("Smu memory pool size not supported\n"); 1044 goto def_value; 1045 } 1046 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; 1047 1048 return; 1049 1050 def_value1: 1051 DRM_WARN("No enough system memory\n"); 1052 def_value: 1053 adev->pm.smu_prv_buffer_size = 0; 1054 } 1055 1056 /** 1057 * amdgpu_device_check_arguments - validate module params 1058 * 1059 * @adev: amdgpu_device pointer 1060 * 1061 * Validates certain module parameters and updates 1062 * the associated values used by the driver (all asics). 1063 */ 1064 static int amdgpu_device_check_arguments(struct amdgpu_device *adev) 1065 { 1066 if (amdgpu_sched_jobs < 4) { 1067 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 1068 amdgpu_sched_jobs); 1069 amdgpu_sched_jobs = 4; 1070 } else if (!is_power_of_2(amdgpu_sched_jobs)){ 1071 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 1072 amdgpu_sched_jobs); 1073 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1074 } 1075 1076 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 1077 /* gart size must be greater or equal to 32M */ 1078 dev_warn(adev->dev, "gart size (%d) too small\n", 1079 amdgpu_gart_size); 1080 amdgpu_gart_size = -1; 1081 } 1082 1083 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { 1084 /* gtt size must be greater or equal to 32M */ 1085 dev_warn(adev->dev, "gtt size (%d) too small\n", 1086 amdgpu_gtt_size); 1087 amdgpu_gtt_size = -1; 1088 } 1089 1090 /* valid range is between 4 and 9 inclusive */ 1091 if (amdgpu_vm_fragment_size != -1 && 1092 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { 1093 dev_warn(adev->dev, "valid range is between 4 and 9\n"); 1094 amdgpu_vm_fragment_size = -1; 1095 } 1096 1097 amdgpu_device_check_smu_prv_buffer_size(adev); 1098 1099 amdgpu_device_check_vm_size(adev); 1100 1101 amdgpu_device_check_block_size(adev); 1102 1103 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1104 1105 return 0; 1106 } 1107 1108 /** 1109 * amdgpu_switcheroo_set_state - set switcheroo state 1110 * 1111 * @pdev: pci dev pointer 1112 * @state: vga_switcheroo state 1113 * 1114 * Callback for the switcheroo driver. Suspends or resumes the 1115 * the asics before or after it is powered up using ACPI methods. 1116 */ 1117 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 1118 { 1119 struct drm_device *dev = pci_get_drvdata(pdev); 1120 int r; 1121 1122 if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF) 1123 return; 1124 1125 if (state == VGA_SWITCHEROO_ON) { 1126 pr_info("amdgpu: switched on\n"); 1127 /* don't suspend or resume card normally */ 1128 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1129 1130 pci_set_power_state(dev->pdev, PCI_D0); 1131 pci_restore_state(dev->pdev); 1132 r = pci_enable_device(dev->pdev); 1133 if (r) 1134 DRM_WARN("pci_enable_device failed (%d)\n", r); 1135 amdgpu_device_resume(dev, true); 1136 1137 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1138 drm_kms_helper_poll_enable(dev); 1139 } else { 1140 pr_info("amdgpu: switched off\n"); 1141 drm_kms_helper_poll_disable(dev); 1142 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1143 amdgpu_device_suspend(dev, true); 1144 pci_save_state(dev->pdev); 1145 /* Shut down the device */ 1146 pci_disable_device(dev->pdev); 1147 pci_set_power_state(dev->pdev, PCI_D3cold); 1148 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1149 } 1150 } 1151 1152 /** 1153 * amdgpu_switcheroo_can_switch - see if switcheroo state can change 1154 * 1155 * @pdev: pci dev pointer 1156 * 1157 * Callback for the switcheroo driver. Check of the switcheroo 1158 * state can be changed. 1159 * Returns true if the state can be changed, false if not. 1160 */ 1161 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) 1162 { 1163 struct drm_device *dev = pci_get_drvdata(pdev); 1164 1165 /* 1166 * FIXME: open_count is protected by drm_global_mutex but that would lead to 1167 * locking inversion with the driver load path. And the access here is 1168 * completely racy anyway. So don't bother with locking for now. 1169 */ 1170 return dev->open_count == 0; 1171 } 1172 1173 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { 1174 .set_gpu_state = amdgpu_switcheroo_set_state, 1175 .reprobe = NULL, 1176 .can_switch = amdgpu_switcheroo_can_switch, 1177 }; 1178 1179 /** 1180 * amdgpu_device_ip_set_clockgating_state - set the CG state 1181 * 1182 * @dev: amdgpu_device pointer 1183 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1184 * @state: clockgating state (gate or ungate) 1185 * 1186 * Sets the requested clockgating state for all instances of 1187 * the hardware IP specified. 1188 * Returns the error code from the last instance. 1189 */ 1190 int amdgpu_device_ip_set_clockgating_state(void *dev, 1191 enum amd_ip_block_type block_type, 1192 enum amd_clockgating_state state) 1193 { 1194 struct amdgpu_device *adev = dev; 1195 int i, r = 0; 1196 1197 for (i = 0; i < adev->num_ip_blocks; i++) { 1198 if (!adev->ip_blocks[i].status.valid) 1199 continue; 1200 if (adev->ip_blocks[i].version->type != block_type) 1201 continue; 1202 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) 1203 continue; 1204 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 1205 (void *)adev, state); 1206 if (r) 1207 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", 1208 adev->ip_blocks[i].version->funcs->name, r); 1209 } 1210 return r; 1211 } 1212 1213 /** 1214 * amdgpu_device_ip_set_powergating_state - set the PG state 1215 * 1216 * @dev: amdgpu_device pointer 1217 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1218 * @state: powergating state (gate or ungate) 1219 * 1220 * Sets the requested powergating state for all instances of 1221 * the hardware IP specified. 1222 * Returns the error code from the last instance. 1223 */ 1224 int amdgpu_device_ip_set_powergating_state(void *dev, 1225 enum amd_ip_block_type block_type, 1226 enum amd_powergating_state state) 1227 { 1228 struct amdgpu_device *adev = dev; 1229 int i, r = 0; 1230 1231 for (i = 0; i < adev->num_ip_blocks; i++) { 1232 if (!adev->ip_blocks[i].status.valid) 1233 continue; 1234 if (adev->ip_blocks[i].version->type != block_type) 1235 continue; 1236 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) 1237 continue; 1238 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 1239 (void *)adev, state); 1240 if (r) 1241 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", 1242 adev->ip_blocks[i].version->funcs->name, r); 1243 } 1244 return r; 1245 } 1246 1247 /** 1248 * amdgpu_device_ip_get_clockgating_state - get the CG state 1249 * 1250 * @adev: amdgpu_device pointer 1251 * @flags: clockgating feature flags 1252 * 1253 * Walks the list of IPs on the device and updates the clockgating 1254 * flags for each IP. 1255 * Updates @flags with the feature flags for each hardware IP where 1256 * clockgating is enabled. 1257 */ 1258 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 1259 u32 *flags) 1260 { 1261 int i; 1262 1263 for (i = 0; i < adev->num_ip_blocks; i++) { 1264 if (!adev->ip_blocks[i].status.valid) 1265 continue; 1266 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) 1267 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); 1268 } 1269 } 1270 1271 /** 1272 * amdgpu_device_ip_wait_for_idle - wait for idle 1273 * 1274 * @adev: amdgpu_device pointer 1275 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1276 * 1277 * Waits for the request hardware IP to be idle. 1278 * Returns 0 for success or a negative error code on failure. 1279 */ 1280 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 1281 enum amd_ip_block_type block_type) 1282 { 1283 int i, r; 1284 1285 for (i = 0; i < adev->num_ip_blocks; i++) { 1286 if (!adev->ip_blocks[i].status.valid) 1287 continue; 1288 if (adev->ip_blocks[i].version->type == block_type) { 1289 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); 1290 if (r) 1291 return r; 1292 break; 1293 } 1294 } 1295 return 0; 1296 1297 } 1298 1299 /** 1300 * amdgpu_device_ip_is_idle - is the hardware IP idle 1301 * 1302 * @adev: amdgpu_device pointer 1303 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1304 * 1305 * Check if the hardware IP is idle or not. 1306 * Returns true if it the IP is idle, false if not. 1307 */ 1308 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 1309 enum amd_ip_block_type block_type) 1310 { 1311 int i; 1312 1313 for (i = 0; i < adev->num_ip_blocks; i++) { 1314 if (!adev->ip_blocks[i].status.valid) 1315 continue; 1316 if (adev->ip_blocks[i].version->type == block_type) 1317 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); 1318 } 1319 return true; 1320 1321 } 1322 1323 /** 1324 * amdgpu_device_ip_get_ip_block - get a hw IP pointer 1325 * 1326 * @adev: amdgpu_device pointer 1327 * @type: Type of hardware IP (SMU, GFX, UVD, etc.) 1328 * 1329 * Returns a pointer to the hardware IP block structure 1330 * if it exists for the asic, otherwise NULL. 1331 */ 1332 struct amdgpu_ip_block * 1333 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 1334 enum amd_ip_block_type type) 1335 { 1336 int i; 1337 1338 for (i = 0; i < adev->num_ip_blocks; i++) 1339 if (adev->ip_blocks[i].version->type == type) 1340 return &adev->ip_blocks[i]; 1341 1342 return NULL; 1343 } 1344 1345 /** 1346 * amdgpu_device_ip_block_version_cmp 1347 * 1348 * @adev: amdgpu_device pointer 1349 * @type: enum amd_ip_block_type 1350 * @major: major version 1351 * @minor: minor version 1352 * 1353 * return 0 if equal or greater 1354 * return 1 if smaller or the ip_block doesn't exist 1355 */ 1356 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 1357 enum amd_ip_block_type type, 1358 u32 major, u32 minor) 1359 { 1360 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type); 1361 1362 if (ip_block && ((ip_block->version->major > major) || 1363 ((ip_block->version->major == major) && 1364 (ip_block->version->minor >= minor)))) 1365 return 0; 1366 1367 return 1; 1368 } 1369 1370 /** 1371 * amdgpu_device_ip_block_add 1372 * 1373 * @adev: amdgpu_device pointer 1374 * @ip_block_version: pointer to the IP to add 1375 * 1376 * Adds the IP block driver information to the collection of IPs 1377 * on the asic. 1378 */ 1379 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 1380 const struct amdgpu_ip_block_version *ip_block_version) 1381 { 1382 if (!ip_block_version) 1383 return -EINVAL; 1384 1385 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, 1386 ip_block_version->funcs->name); 1387 1388 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; 1389 1390 return 0; 1391 } 1392 1393 /** 1394 * amdgpu_device_enable_virtual_display - enable virtual display feature 1395 * 1396 * @adev: amdgpu_device pointer 1397 * 1398 * Enabled the virtual display feature if the user has enabled it via 1399 * the module parameter virtual_display. This feature provides a virtual 1400 * display hardware on headless boards or in virtualized environments. 1401 * This function parses and validates the configuration string specified by 1402 * the user and configues the virtual display configuration (number of 1403 * virtual connectors, crtcs, etc.) specified. 1404 */ 1405 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) 1406 { 1407 adev->enable_virtual_display = false; 1408 1409 if (amdgpu_virtual_display) { 1410 struct drm_device *ddev = adev->ddev; 1411 const char *pci_address_name = pci_name(ddev->pdev); 1412 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; 1413 1414 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 1415 pciaddstr_tmp = pciaddstr; 1416 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { 1417 pciaddname = strsep(&pciaddname_tmp, ","); 1418 if (!strcmp("all", pciaddname) 1419 || !strcmp(pci_address_name, pciaddname)) { 1420 long num_crtc; 1421 int res = -1; 1422 1423 adev->enable_virtual_display = true; 1424 1425 if (pciaddname_tmp) 1426 res = kstrtol(pciaddname_tmp, 10, 1427 &num_crtc); 1428 1429 if (!res) { 1430 if (num_crtc < 1) 1431 num_crtc = 1; 1432 if (num_crtc > 6) 1433 num_crtc = 6; 1434 adev->mode_info.num_crtc = num_crtc; 1435 } else { 1436 adev->mode_info.num_crtc = 1; 1437 } 1438 break; 1439 } 1440 } 1441 1442 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 1443 amdgpu_virtual_display, pci_address_name, 1444 adev->enable_virtual_display, adev->mode_info.num_crtc); 1445 1446 kfree(pciaddstr); 1447 } 1448 } 1449 1450 /** 1451 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware 1452 * 1453 * @adev: amdgpu_device pointer 1454 * 1455 * Parses the asic configuration parameters specified in the gpu info 1456 * firmware and makes them availale to the driver for use in configuring 1457 * the asic. 1458 * Returns 0 on success, -EINVAL on failure. 1459 */ 1460 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) 1461 { 1462 const char *chip_name; 1463 char fw_name[30]; 1464 int err; 1465 const struct gpu_info_firmware_header_v1_0 *hdr; 1466 1467 adev->firmware.gpu_info_fw = NULL; 1468 1469 switch (adev->asic_type) { 1470 case CHIP_TOPAZ: 1471 case CHIP_TONGA: 1472 case CHIP_FIJI: 1473 case CHIP_POLARIS10: 1474 case CHIP_POLARIS11: 1475 case CHIP_POLARIS12: 1476 case CHIP_VEGAM: 1477 case CHIP_CARRIZO: 1478 case CHIP_STONEY: 1479 #ifdef CONFIG_DRM_AMDGPU_SI 1480 case CHIP_VERDE: 1481 case CHIP_TAHITI: 1482 case CHIP_PITCAIRN: 1483 case CHIP_OLAND: 1484 case CHIP_HAINAN: 1485 #endif 1486 #ifdef CONFIG_DRM_AMDGPU_CIK 1487 case CHIP_BONAIRE: 1488 case CHIP_HAWAII: 1489 case CHIP_KAVERI: 1490 case CHIP_KABINI: 1491 case CHIP_MULLINS: 1492 #endif 1493 case CHIP_VEGA20: 1494 default: 1495 return 0; 1496 case CHIP_VEGA10: 1497 chip_name = "vega10"; 1498 break; 1499 case CHIP_VEGA12: 1500 chip_name = "vega12"; 1501 break; 1502 case CHIP_RAVEN: 1503 if (adev->rev_id >= 8) 1504 chip_name = "raven2"; 1505 else if (adev->pdev->device == 0x15d8) 1506 chip_name = "picasso"; 1507 else 1508 chip_name = "raven"; 1509 break; 1510 case CHIP_ARCTURUS: 1511 chip_name = "arcturus"; 1512 break; 1513 case CHIP_RENOIR: 1514 chip_name = "renoir"; 1515 break; 1516 case CHIP_NAVI10: 1517 chip_name = "navi10"; 1518 break; 1519 case CHIP_NAVI14: 1520 chip_name = "navi14"; 1521 break; 1522 case CHIP_NAVI12: 1523 chip_name = "navi12"; 1524 break; 1525 } 1526 1527 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); 1528 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); 1529 if (err) { 1530 dev_err(adev->dev, 1531 "Failed to load gpu_info firmware \"%s\"\n", 1532 fw_name); 1533 goto out; 1534 } 1535 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); 1536 if (err) { 1537 dev_err(adev->dev, 1538 "Failed to validate gpu_info firmware \"%s\"\n", 1539 fw_name); 1540 goto out; 1541 } 1542 1543 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; 1544 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); 1545 1546 switch (hdr->version_major) { 1547 case 1: 1548 { 1549 const struct gpu_info_firmware_v1_0 *gpu_info_fw = 1550 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + 1551 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1552 1553 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) 1554 goto parse_soc_bounding_box; 1555 1556 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); 1557 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); 1558 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); 1559 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); 1560 adev->gfx.config.max_texture_channel_caches = 1561 le32_to_cpu(gpu_info_fw->gc_num_tccs); 1562 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); 1563 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); 1564 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); 1565 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); 1566 adev->gfx.config.double_offchip_lds_buf = 1567 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); 1568 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); 1569 adev->gfx.cu_info.max_waves_per_simd = 1570 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); 1571 adev->gfx.cu_info.max_scratch_slots_per_cu = 1572 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); 1573 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); 1574 if (hdr->version_minor >= 1) { 1575 const struct gpu_info_firmware_v1_1 *gpu_info_fw = 1576 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + 1577 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1578 adev->gfx.config.num_sc_per_sh = 1579 le32_to_cpu(gpu_info_fw->num_sc_per_sh); 1580 adev->gfx.config.num_packer_per_sc = 1581 le32_to_cpu(gpu_info_fw->num_packer_per_sc); 1582 } 1583 1584 parse_soc_bounding_box: 1585 /* 1586 * soc bounding box info is not integrated in disocovery table, 1587 * we always need to parse it from gpu info firmware. 1588 */ 1589 if (hdr->version_minor == 2) { 1590 const struct gpu_info_firmware_v1_2 *gpu_info_fw = 1591 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + 1592 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1593 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; 1594 } 1595 break; 1596 } 1597 default: 1598 dev_err(adev->dev, 1599 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); 1600 err = -EINVAL; 1601 goto out; 1602 } 1603 out: 1604 return err; 1605 } 1606 1607 /** 1608 * amdgpu_device_ip_early_init - run early init for hardware IPs 1609 * 1610 * @adev: amdgpu_device pointer 1611 * 1612 * Early initialization pass for hardware IPs. The hardware IPs that make 1613 * up each asic are discovered each IP's early_init callback is run. This 1614 * is the first stage in initializing the asic. 1615 * Returns 0 on success, negative error code on failure. 1616 */ 1617 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) 1618 { 1619 int i, r; 1620 1621 amdgpu_device_enable_virtual_display(adev); 1622 1623 switch (adev->asic_type) { 1624 case CHIP_TOPAZ: 1625 case CHIP_TONGA: 1626 case CHIP_FIJI: 1627 case CHIP_POLARIS10: 1628 case CHIP_POLARIS11: 1629 case CHIP_POLARIS12: 1630 case CHIP_VEGAM: 1631 case CHIP_CARRIZO: 1632 case CHIP_STONEY: 1633 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) 1634 adev->family = AMDGPU_FAMILY_CZ; 1635 else 1636 adev->family = AMDGPU_FAMILY_VI; 1637 1638 r = vi_set_ip_blocks(adev); 1639 if (r) 1640 return r; 1641 break; 1642 #ifdef CONFIG_DRM_AMDGPU_SI 1643 case CHIP_VERDE: 1644 case CHIP_TAHITI: 1645 case CHIP_PITCAIRN: 1646 case CHIP_OLAND: 1647 case CHIP_HAINAN: 1648 adev->family = AMDGPU_FAMILY_SI; 1649 r = si_set_ip_blocks(adev); 1650 if (r) 1651 return r; 1652 break; 1653 #endif 1654 #ifdef CONFIG_DRM_AMDGPU_CIK 1655 case CHIP_BONAIRE: 1656 case CHIP_HAWAII: 1657 case CHIP_KAVERI: 1658 case CHIP_KABINI: 1659 case CHIP_MULLINS: 1660 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII)) 1661 adev->family = AMDGPU_FAMILY_CI; 1662 else 1663 adev->family = AMDGPU_FAMILY_KV; 1664 1665 r = cik_set_ip_blocks(adev); 1666 if (r) 1667 return r; 1668 break; 1669 #endif 1670 case CHIP_VEGA10: 1671 case CHIP_VEGA12: 1672 case CHIP_VEGA20: 1673 case CHIP_RAVEN: 1674 case CHIP_ARCTURUS: 1675 case CHIP_RENOIR: 1676 if (adev->asic_type == CHIP_RAVEN || 1677 adev->asic_type == CHIP_RENOIR) 1678 adev->family = AMDGPU_FAMILY_RV; 1679 else 1680 adev->family = AMDGPU_FAMILY_AI; 1681 1682 r = soc15_set_ip_blocks(adev); 1683 if (r) 1684 return r; 1685 break; 1686 case CHIP_NAVI10: 1687 case CHIP_NAVI14: 1688 case CHIP_NAVI12: 1689 adev->family = AMDGPU_FAMILY_NV; 1690 1691 r = nv_set_ip_blocks(adev); 1692 if (r) 1693 return r; 1694 break; 1695 default: 1696 /* FIXME: not supported yet */ 1697 return -EINVAL; 1698 } 1699 1700 r = amdgpu_device_parse_gpu_info_fw(adev); 1701 if (r) 1702 return r; 1703 1704 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) 1705 amdgpu_discovery_get_gfx_info(adev); 1706 1707 amdgpu_amdkfd_device_probe(adev); 1708 1709 if (amdgpu_sriov_vf(adev)) { 1710 r = amdgpu_virt_request_full_gpu(adev, true); 1711 if (r) 1712 return -EAGAIN; 1713 } 1714 1715 adev->pm.pp_feature = amdgpu_pp_feature_mask; 1716 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) 1717 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 1718 1719 for (i = 0; i < adev->num_ip_blocks; i++) { 1720 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1721 DRM_ERROR("disabled ip block: %d <%s>\n", 1722 i, adev->ip_blocks[i].version->funcs->name); 1723 adev->ip_blocks[i].status.valid = false; 1724 } else { 1725 if (adev->ip_blocks[i].version->funcs->early_init) { 1726 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); 1727 if (r == -ENOENT) { 1728 adev->ip_blocks[i].status.valid = false; 1729 } else if (r) { 1730 DRM_ERROR("early_init of IP block <%s> failed %d\n", 1731 adev->ip_blocks[i].version->funcs->name, r); 1732 return r; 1733 } else { 1734 adev->ip_blocks[i].status.valid = true; 1735 } 1736 } else { 1737 adev->ip_blocks[i].status.valid = true; 1738 } 1739 } 1740 /* get the vbios after the asic_funcs are set up */ 1741 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 1742 /* Read BIOS */ 1743 if (!amdgpu_get_bios(adev)) 1744 return -EINVAL; 1745 1746 r = amdgpu_atombios_init(adev); 1747 if (r) { 1748 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 1749 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); 1750 return r; 1751 } 1752 } 1753 } 1754 1755 adev->cg_flags &= amdgpu_cg_mask; 1756 adev->pg_flags &= amdgpu_pg_mask; 1757 1758 return 0; 1759 } 1760 1761 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) 1762 { 1763 int i, r; 1764 1765 for (i = 0; i < adev->num_ip_blocks; i++) { 1766 if (!adev->ip_blocks[i].status.sw) 1767 continue; 1768 if (adev->ip_blocks[i].status.hw) 1769 continue; 1770 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 1771 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || 1772 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 1773 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 1774 if (r) { 1775 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1776 adev->ip_blocks[i].version->funcs->name, r); 1777 return r; 1778 } 1779 adev->ip_blocks[i].status.hw = true; 1780 } 1781 } 1782 1783 return 0; 1784 } 1785 1786 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) 1787 { 1788 int i, r; 1789 1790 for (i = 0; i < adev->num_ip_blocks; i++) { 1791 if (!adev->ip_blocks[i].status.sw) 1792 continue; 1793 if (adev->ip_blocks[i].status.hw) 1794 continue; 1795 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 1796 if (r) { 1797 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1798 adev->ip_blocks[i].version->funcs->name, r); 1799 return r; 1800 } 1801 adev->ip_blocks[i].status.hw = true; 1802 } 1803 1804 return 0; 1805 } 1806 1807 static int amdgpu_device_fw_loading(struct amdgpu_device *adev) 1808 { 1809 int r = 0; 1810 int i; 1811 uint32_t smu_version; 1812 1813 if (adev->asic_type >= CHIP_VEGA10) { 1814 for (i = 0; i < adev->num_ip_blocks; i++) { 1815 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) 1816 continue; 1817 1818 /* no need to do the fw loading again if already done*/ 1819 if (adev->ip_blocks[i].status.hw == true) 1820 break; 1821 1822 if (adev->in_gpu_reset || adev->in_suspend) { 1823 r = adev->ip_blocks[i].version->funcs->resume(adev); 1824 if (r) { 1825 DRM_ERROR("resume of IP block <%s> failed %d\n", 1826 adev->ip_blocks[i].version->funcs->name, r); 1827 return r; 1828 } 1829 } else { 1830 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 1831 if (r) { 1832 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1833 adev->ip_blocks[i].version->funcs->name, r); 1834 return r; 1835 } 1836 } 1837 1838 adev->ip_blocks[i].status.hw = true; 1839 break; 1840 } 1841 } 1842 1843 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) 1844 r = amdgpu_pm_load_smu_firmware(adev, &smu_version); 1845 1846 return r; 1847 } 1848 1849 /** 1850 * amdgpu_device_ip_init - run init for hardware IPs 1851 * 1852 * @adev: amdgpu_device pointer 1853 * 1854 * Main initialization pass for hardware IPs. The list of all the hardware 1855 * IPs that make up the asic is walked and the sw_init and hw_init callbacks 1856 * are run. sw_init initializes the software state associated with each IP 1857 * and hw_init initializes the hardware associated with each IP. 1858 * Returns 0 on success, negative error code on failure. 1859 */ 1860 static int amdgpu_device_ip_init(struct amdgpu_device *adev) 1861 { 1862 int i, r; 1863 1864 r = amdgpu_ras_init(adev); 1865 if (r) 1866 return r; 1867 1868 for (i = 0; i < adev->num_ip_blocks; i++) { 1869 if (!adev->ip_blocks[i].status.valid) 1870 continue; 1871 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); 1872 if (r) { 1873 DRM_ERROR("sw_init of IP block <%s> failed %d\n", 1874 adev->ip_blocks[i].version->funcs->name, r); 1875 goto init_failed; 1876 } 1877 adev->ip_blocks[i].status.sw = true; 1878 1879 /* need to do gmc hw init early so we can allocate gpu mem */ 1880 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1881 r = amdgpu_device_vram_scratch_init(adev); 1882 if (r) { 1883 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 1884 goto init_failed; 1885 } 1886 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 1887 if (r) { 1888 DRM_ERROR("hw_init %d failed %d\n", i, r); 1889 goto init_failed; 1890 } 1891 r = amdgpu_device_wb_init(adev); 1892 if (r) { 1893 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); 1894 goto init_failed; 1895 } 1896 adev->ip_blocks[i].status.hw = true; 1897 1898 /* right after GMC hw init, we create CSA */ 1899 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1900 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, 1901 AMDGPU_GEM_DOMAIN_VRAM, 1902 AMDGPU_CSA_SIZE); 1903 if (r) { 1904 DRM_ERROR("allocate CSA failed %d\n", r); 1905 goto init_failed; 1906 } 1907 } 1908 } 1909 } 1910 1911 if (amdgpu_sriov_vf(adev)) 1912 amdgpu_virt_init_data_exchange(adev); 1913 1914 r = amdgpu_ib_pool_init(adev); 1915 if (r) { 1916 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 1917 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); 1918 goto init_failed; 1919 } 1920 1921 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ 1922 if (r) 1923 goto init_failed; 1924 1925 r = amdgpu_device_ip_hw_init_phase1(adev); 1926 if (r) 1927 goto init_failed; 1928 1929 r = amdgpu_device_fw_loading(adev); 1930 if (r) 1931 goto init_failed; 1932 1933 r = amdgpu_device_ip_hw_init_phase2(adev); 1934 if (r) 1935 goto init_failed; 1936 1937 /* 1938 * retired pages will be loaded from eeprom and reserved here, 1939 * it should be called after amdgpu_device_ip_hw_init_phase2 since 1940 * for some ASICs the RAS EEPROM code relies on SMU fully functioning 1941 * for I2C communication which only true at this point. 1942 * recovery_init may fail, but it can free all resources allocated by 1943 * itself and its failure should not stop amdgpu init process. 1944 * 1945 * Note: theoretically, this should be called before all vram allocations 1946 * to protect retired page from abusing 1947 */ 1948 amdgpu_ras_recovery_init(adev); 1949 1950 if (adev->gmc.xgmi.num_physical_nodes > 1) 1951 amdgpu_xgmi_add_device(adev); 1952 amdgpu_amdkfd_device_init(adev); 1953 1954 init_failed: 1955 if (amdgpu_sriov_vf(adev)) 1956 amdgpu_virt_release_full_gpu(adev, true); 1957 1958 return r; 1959 } 1960 1961 /** 1962 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer 1963 * 1964 * @adev: amdgpu_device pointer 1965 * 1966 * Writes a reset magic value to the gart pointer in VRAM. The driver calls 1967 * this function before a GPU reset. If the value is retained after a 1968 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents. 1969 */ 1970 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) 1971 { 1972 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); 1973 } 1974 1975 /** 1976 * amdgpu_device_check_vram_lost - check if vram is valid 1977 * 1978 * @adev: amdgpu_device pointer 1979 * 1980 * Checks the reset magic value written to the gart pointer in VRAM. 1981 * The driver calls this after a GPU reset to see if the contents of 1982 * VRAM is lost or now. 1983 * returns true if vram is lost, false if not. 1984 */ 1985 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) 1986 { 1987 return !!memcmp(adev->gart.ptr, adev->reset_magic, 1988 AMDGPU_RESET_MAGIC_NUM); 1989 } 1990 1991 /** 1992 * amdgpu_device_set_cg_state - set clockgating for amdgpu device 1993 * 1994 * @adev: amdgpu_device pointer 1995 * @state: clockgating state (gate or ungate) 1996 * 1997 * The list of all the hardware IPs that make up the asic is walked and the 1998 * set_clockgating_state callbacks are run. 1999 * Late initialization pass enabling clockgating for hardware IPs. 2000 * Fini or suspend, pass disabling clockgating for hardware IPs. 2001 * Returns 0 on success, negative error code on failure. 2002 */ 2003 2004 static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 2005 enum amd_clockgating_state state) 2006 { 2007 int i, j, r; 2008 2009 if (amdgpu_emu_mode == 1) 2010 return 0; 2011 2012 for (j = 0; j < adev->num_ip_blocks; j++) { 2013 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2014 if (!adev->ip_blocks[i].status.late_initialized) 2015 continue; 2016 /* skip CG for VCE/UVD, it's handled specially */ 2017 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2018 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2019 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2020 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2021 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 2022 /* enable clockgating to save power */ 2023 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 2024 state); 2025 if (r) { 2026 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 2027 adev->ip_blocks[i].version->funcs->name, r); 2028 return r; 2029 } 2030 } 2031 } 2032 2033 return 0; 2034 } 2035 2036 static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state) 2037 { 2038 int i, j, r; 2039 2040 if (amdgpu_emu_mode == 1) 2041 return 0; 2042 2043 for (j = 0; j < adev->num_ip_blocks; j++) { 2044 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2045 if (!adev->ip_blocks[i].status.late_initialized) 2046 continue; 2047 /* skip CG for VCE/UVD, it's handled specially */ 2048 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2049 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2050 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2051 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2052 adev->ip_blocks[i].version->funcs->set_powergating_state) { 2053 /* enable powergating to save power */ 2054 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, 2055 state); 2056 if (r) { 2057 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", 2058 adev->ip_blocks[i].version->funcs->name, r); 2059 return r; 2060 } 2061 } 2062 } 2063 return 0; 2064 } 2065 2066 static int amdgpu_device_enable_mgpu_fan_boost(void) 2067 { 2068 struct amdgpu_gpu_instance *gpu_ins; 2069 struct amdgpu_device *adev; 2070 int i, ret = 0; 2071 2072 mutex_lock(&mgpu_info.mutex); 2073 2074 /* 2075 * MGPU fan boost feature should be enabled 2076 * only when there are two or more dGPUs in 2077 * the system 2078 */ 2079 if (mgpu_info.num_dgpu < 2) 2080 goto out; 2081 2082 for (i = 0; i < mgpu_info.num_dgpu; i++) { 2083 gpu_ins = &(mgpu_info.gpu_ins[i]); 2084 adev = gpu_ins->adev; 2085 if (!(adev->flags & AMD_IS_APU) && 2086 !gpu_ins->mgpu_fan_enabled && 2087 adev->powerplay.pp_funcs && 2088 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) { 2089 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); 2090 if (ret) 2091 break; 2092 2093 gpu_ins->mgpu_fan_enabled = 1; 2094 } 2095 } 2096 2097 out: 2098 mutex_unlock(&mgpu_info.mutex); 2099 2100 return ret; 2101 } 2102 2103 /** 2104 * amdgpu_device_ip_late_init - run late init for hardware IPs 2105 * 2106 * @adev: amdgpu_device pointer 2107 * 2108 * Late initialization pass for hardware IPs. The list of all the hardware 2109 * IPs that make up the asic is walked and the late_init callbacks are run. 2110 * late_init covers any special initialization that an IP requires 2111 * after all of the have been initialized or something that needs to happen 2112 * late in the init process. 2113 * Returns 0 on success, negative error code on failure. 2114 */ 2115 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) 2116 { 2117 struct amdgpu_gpu_instance *gpu_instance; 2118 int i = 0, r; 2119 2120 for (i = 0; i < adev->num_ip_blocks; i++) { 2121 if (!adev->ip_blocks[i].status.hw) 2122 continue; 2123 if (adev->ip_blocks[i].version->funcs->late_init) { 2124 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); 2125 if (r) { 2126 DRM_ERROR("late_init of IP block <%s> failed %d\n", 2127 adev->ip_blocks[i].version->funcs->name, r); 2128 return r; 2129 } 2130 } 2131 adev->ip_blocks[i].status.late_initialized = true; 2132 } 2133 2134 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); 2135 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); 2136 2137 amdgpu_device_fill_reset_magic(adev); 2138 2139 r = amdgpu_device_enable_mgpu_fan_boost(); 2140 if (r) 2141 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); 2142 2143 2144 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2145 mutex_lock(&mgpu_info.mutex); 2146 2147 /* 2148 * Reset device p-state to low as this was booted with high. 2149 * 2150 * This should be performed only after all devices from the same 2151 * hive get initialized. 2152 * 2153 * However, it's unknown how many device in the hive in advance. 2154 * As this is counted one by one during devices initializations. 2155 * 2156 * So, we wait for all XGMI interlinked devices initialized. 2157 * This may bring some delays as those devices may come from 2158 * different hives. But that should be OK. 2159 */ 2160 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { 2161 for (i = 0; i < mgpu_info.num_gpu; i++) { 2162 gpu_instance = &(mgpu_info.gpu_ins[i]); 2163 if (gpu_instance->adev->flags & AMD_IS_APU) 2164 continue; 2165 2166 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0); 2167 if (r) { 2168 DRM_ERROR("pstate setting failed (%d).\n", r); 2169 break; 2170 } 2171 } 2172 } 2173 2174 mutex_unlock(&mgpu_info.mutex); 2175 } 2176 2177 return 0; 2178 } 2179 2180 /** 2181 * amdgpu_device_ip_fini - run fini for hardware IPs 2182 * 2183 * @adev: amdgpu_device pointer 2184 * 2185 * Main teardown pass for hardware IPs. The list of all the hardware 2186 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks 2187 * are run. hw_fini tears down the hardware associated with each IP 2188 * and sw_fini tears down any software state associated with each IP. 2189 * Returns 0 on success, negative error code on failure. 2190 */ 2191 static int amdgpu_device_ip_fini(struct amdgpu_device *adev) 2192 { 2193 int i, r; 2194 2195 amdgpu_ras_pre_fini(adev); 2196 2197 if (adev->gmc.xgmi.num_physical_nodes > 1) 2198 amdgpu_xgmi_remove_device(adev); 2199 2200 amdgpu_amdkfd_device_fini(adev); 2201 2202 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2203 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2204 2205 /* need to disable SMC first */ 2206 for (i = 0; i < adev->num_ip_blocks; i++) { 2207 if (!adev->ip_blocks[i].status.hw) 2208 continue; 2209 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 2210 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2211 /* XXX handle errors */ 2212 if (r) { 2213 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2214 adev->ip_blocks[i].version->funcs->name, r); 2215 } 2216 adev->ip_blocks[i].status.hw = false; 2217 break; 2218 } 2219 } 2220 2221 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2222 if (!adev->ip_blocks[i].status.hw) 2223 continue; 2224 2225 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2226 /* XXX handle errors */ 2227 if (r) { 2228 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2229 adev->ip_blocks[i].version->funcs->name, r); 2230 } 2231 2232 adev->ip_blocks[i].status.hw = false; 2233 } 2234 2235 2236 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2237 if (!adev->ip_blocks[i].status.sw) 2238 continue; 2239 2240 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2241 amdgpu_ucode_free_bo(adev); 2242 amdgpu_free_static_csa(&adev->virt.csa_obj); 2243 amdgpu_device_wb_fini(adev); 2244 amdgpu_device_vram_scratch_fini(adev); 2245 amdgpu_ib_pool_fini(adev); 2246 } 2247 2248 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 2249 /* XXX handle errors */ 2250 if (r) { 2251 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", 2252 adev->ip_blocks[i].version->funcs->name, r); 2253 } 2254 adev->ip_blocks[i].status.sw = false; 2255 adev->ip_blocks[i].status.valid = false; 2256 } 2257 2258 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2259 if (!adev->ip_blocks[i].status.late_initialized) 2260 continue; 2261 if (adev->ip_blocks[i].version->funcs->late_fini) 2262 adev->ip_blocks[i].version->funcs->late_fini((void *)adev); 2263 adev->ip_blocks[i].status.late_initialized = false; 2264 } 2265 2266 amdgpu_ras_fini(adev); 2267 2268 if (amdgpu_sriov_vf(adev)) 2269 if (amdgpu_virt_release_full_gpu(adev, false)) 2270 DRM_ERROR("failed to release exclusive mode on fini\n"); 2271 2272 return 0; 2273 } 2274 2275 /** 2276 * amdgpu_device_delayed_init_work_handler - work handler for IB tests 2277 * 2278 * @work: work_struct. 2279 */ 2280 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) 2281 { 2282 struct amdgpu_device *adev = 2283 container_of(work, struct amdgpu_device, delayed_init_work.work); 2284 int r; 2285 2286 r = amdgpu_ib_ring_tests(adev); 2287 if (r) 2288 DRM_ERROR("ib ring test failed (%d).\n", r); 2289 } 2290 2291 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) 2292 { 2293 struct amdgpu_device *adev = 2294 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); 2295 2296 mutex_lock(&adev->gfx.gfx_off_mutex); 2297 if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { 2298 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) 2299 adev->gfx.gfx_off_state = true; 2300 } 2301 mutex_unlock(&adev->gfx.gfx_off_mutex); 2302 } 2303 2304 /** 2305 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) 2306 * 2307 * @adev: amdgpu_device pointer 2308 * 2309 * Main suspend function for hardware IPs. The list of all the hardware 2310 * IPs that make up the asic is walked, clockgating is disabled and the 2311 * suspend callbacks are run. suspend puts the hardware and software state 2312 * in each IP into a state suitable for suspend. 2313 * Returns 0 on success, negative error code on failure. 2314 */ 2315 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) 2316 { 2317 int i, r; 2318 2319 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2320 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2321 2322 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2323 if (!adev->ip_blocks[i].status.valid) 2324 continue; 2325 /* displays are handled separately */ 2326 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { 2327 /* XXX handle errors */ 2328 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2329 /* XXX handle errors */ 2330 if (r) { 2331 DRM_ERROR("suspend of IP block <%s> failed %d\n", 2332 adev->ip_blocks[i].version->funcs->name, r); 2333 return r; 2334 } 2335 adev->ip_blocks[i].status.hw = false; 2336 } 2337 } 2338 2339 return 0; 2340 } 2341 2342 /** 2343 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2) 2344 * 2345 * @adev: amdgpu_device pointer 2346 * 2347 * Main suspend function for hardware IPs. The list of all the hardware 2348 * IPs that make up the asic is walked, clockgating is disabled and the 2349 * suspend callbacks are run. suspend puts the hardware and software state 2350 * in each IP into a state suitable for suspend. 2351 * Returns 0 on success, negative error code on failure. 2352 */ 2353 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) 2354 { 2355 int i, r; 2356 2357 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2358 if (!adev->ip_blocks[i].status.valid) 2359 continue; 2360 /* displays are handled in phase1 */ 2361 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) 2362 continue; 2363 /* PSP lost connection when err_event_athub occurs */ 2364 if (amdgpu_ras_intr_triggered() && 2365 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 2366 adev->ip_blocks[i].status.hw = false; 2367 continue; 2368 } 2369 /* XXX handle errors */ 2370 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2371 /* XXX handle errors */ 2372 if (r) { 2373 DRM_ERROR("suspend of IP block <%s> failed %d\n", 2374 adev->ip_blocks[i].version->funcs->name, r); 2375 } 2376 adev->ip_blocks[i].status.hw = false; 2377 /* handle putting the SMC in the appropriate state */ 2378 if(!amdgpu_sriov_vf(adev)){ 2379 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 2380 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); 2381 if (r) { 2382 DRM_ERROR("SMC failed to set mp1 state %d, %d\n", 2383 adev->mp1_state, r); 2384 return r; 2385 } 2386 } 2387 } 2388 adev->ip_blocks[i].status.hw = false; 2389 } 2390 2391 return 0; 2392 } 2393 2394 /** 2395 * amdgpu_device_ip_suspend - run suspend for hardware IPs 2396 * 2397 * @adev: amdgpu_device pointer 2398 * 2399 * Main suspend function for hardware IPs. The list of all the hardware 2400 * IPs that make up the asic is walked, clockgating is disabled and the 2401 * suspend callbacks are run. suspend puts the hardware and software state 2402 * in each IP into a state suitable for suspend. 2403 * Returns 0 on success, negative error code on failure. 2404 */ 2405 int amdgpu_device_ip_suspend(struct amdgpu_device *adev) 2406 { 2407 int r; 2408 2409 if (amdgpu_sriov_vf(adev)) 2410 amdgpu_virt_request_full_gpu(adev, false); 2411 2412 r = amdgpu_device_ip_suspend_phase1(adev); 2413 if (r) 2414 return r; 2415 r = amdgpu_device_ip_suspend_phase2(adev); 2416 2417 if (amdgpu_sriov_vf(adev)) 2418 amdgpu_virt_release_full_gpu(adev, false); 2419 2420 return r; 2421 } 2422 2423 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) 2424 { 2425 int i, r; 2426 2427 static enum amd_ip_block_type ip_order[] = { 2428 AMD_IP_BLOCK_TYPE_GMC, 2429 AMD_IP_BLOCK_TYPE_COMMON, 2430 AMD_IP_BLOCK_TYPE_PSP, 2431 AMD_IP_BLOCK_TYPE_IH, 2432 }; 2433 2434 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 2435 int j; 2436 struct amdgpu_ip_block *block; 2437 2438 for (j = 0; j < adev->num_ip_blocks; j++) { 2439 block = &adev->ip_blocks[j]; 2440 2441 block->status.hw = false; 2442 if (block->version->type != ip_order[i] || 2443 !block->status.valid) 2444 continue; 2445 2446 r = block->version->funcs->hw_init(adev); 2447 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 2448 if (r) 2449 return r; 2450 block->status.hw = true; 2451 } 2452 } 2453 2454 return 0; 2455 } 2456 2457 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) 2458 { 2459 int i, r; 2460 2461 static enum amd_ip_block_type ip_order[] = { 2462 AMD_IP_BLOCK_TYPE_SMC, 2463 AMD_IP_BLOCK_TYPE_DCE, 2464 AMD_IP_BLOCK_TYPE_GFX, 2465 AMD_IP_BLOCK_TYPE_SDMA, 2466 AMD_IP_BLOCK_TYPE_UVD, 2467 AMD_IP_BLOCK_TYPE_VCE, 2468 AMD_IP_BLOCK_TYPE_VCN 2469 }; 2470 2471 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 2472 int j; 2473 struct amdgpu_ip_block *block; 2474 2475 for (j = 0; j < adev->num_ip_blocks; j++) { 2476 block = &adev->ip_blocks[j]; 2477 2478 if (block->version->type != ip_order[i] || 2479 !block->status.valid || 2480 block->status.hw) 2481 continue; 2482 2483 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) 2484 r = block->version->funcs->resume(adev); 2485 else 2486 r = block->version->funcs->hw_init(adev); 2487 2488 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 2489 if (r) 2490 return r; 2491 block->status.hw = true; 2492 } 2493 } 2494 2495 return 0; 2496 } 2497 2498 /** 2499 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs 2500 * 2501 * @adev: amdgpu_device pointer 2502 * 2503 * First resume function for hardware IPs. The list of all the hardware 2504 * IPs that make up the asic is walked and the resume callbacks are run for 2505 * COMMON, GMC, and IH. resume puts the hardware into a functional state 2506 * after a suspend and updates the software state as necessary. This 2507 * function is also used for restoring the GPU after a GPU reset. 2508 * Returns 0 on success, negative error code on failure. 2509 */ 2510 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) 2511 { 2512 int i, r; 2513 2514 for (i = 0; i < adev->num_ip_blocks; i++) { 2515 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 2516 continue; 2517 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2518 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 2519 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 2520 2521 r = adev->ip_blocks[i].version->funcs->resume(adev); 2522 if (r) { 2523 DRM_ERROR("resume of IP block <%s> failed %d\n", 2524 adev->ip_blocks[i].version->funcs->name, r); 2525 return r; 2526 } 2527 adev->ip_blocks[i].status.hw = true; 2528 } 2529 } 2530 2531 return 0; 2532 } 2533 2534 /** 2535 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs 2536 * 2537 * @adev: amdgpu_device pointer 2538 * 2539 * First resume function for hardware IPs. The list of all the hardware 2540 * IPs that make up the asic is walked and the resume callbacks are run for 2541 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a 2542 * functional state after a suspend and updates the software state as 2543 * necessary. This function is also used for restoring the GPU after a GPU 2544 * reset. 2545 * Returns 0 on success, negative error code on failure. 2546 */ 2547 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) 2548 { 2549 int i, r; 2550 2551 for (i = 0; i < adev->num_ip_blocks; i++) { 2552 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 2553 continue; 2554 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2555 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 2556 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 2557 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) 2558 continue; 2559 r = adev->ip_blocks[i].version->funcs->resume(adev); 2560 if (r) { 2561 DRM_ERROR("resume of IP block <%s> failed %d\n", 2562 adev->ip_blocks[i].version->funcs->name, r); 2563 return r; 2564 } 2565 adev->ip_blocks[i].status.hw = true; 2566 } 2567 2568 return 0; 2569 } 2570 2571 /** 2572 * amdgpu_device_ip_resume - run resume for hardware IPs 2573 * 2574 * @adev: amdgpu_device pointer 2575 * 2576 * Main resume function for hardware IPs. The hardware IPs 2577 * are split into two resume functions because they are 2578 * are also used in in recovering from a GPU reset and some additional 2579 * steps need to be take between them. In this case (S3/S4) they are 2580 * run sequentially. 2581 * Returns 0 on success, negative error code on failure. 2582 */ 2583 static int amdgpu_device_ip_resume(struct amdgpu_device *adev) 2584 { 2585 int r; 2586 2587 r = amdgpu_device_ip_resume_phase1(adev); 2588 if (r) 2589 return r; 2590 2591 r = amdgpu_device_fw_loading(adev); 2592 if (r) 2593 return r; 2594 2595 r = amdgpu_device_ip_resume_phase2(adev); 2596 2597 return r; 2598 } 2599 2600 /** 2601 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV 2602 * 2603 * @adev: amdgpu_device pointer 2604 * 2605 * Query the VBIOS data tables to determine if the board supports SR-IOV. 2606 */ 2607 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) 2608 { 2609 if (amdgpu_sriov_vf(adev)) { 2610 if (adev->is_atom_fw) { 2611 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) 2612 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 2613 } else { 2614 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) 2615 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 2616 } 2617 2618 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) 2619 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); 2620 } 2621 } 2622 2623 /** 2624 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic 2625 * 2626 * @asic_type: AMD asic type 2627 * 2628 * Check if there is DC (new modesetting infrastructre) support for an asic. 2629 * returns true if DC has support, false if not. 2630 */ 2631 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) 2632 { 2633 switch (asic_type) { 2634 #if defined(CONFIG_DRM_AMD_DC) 2635 case CHIP_BONAIRE: 2636 case CHIP_KAVERI: 2637 case CHIP_KABINI: 2638 case CHIP_MULLINS: 2639 /* 2640 * We have systems in the wild with these ASICs that require 2641 * LVDS and VGA support which is not supported with DC. 2642 * 2643 * Fallback to the non-DC driver here by default so as not to 2644 * cause regressions. 2645 */ 2646 return amdgpu_dc > 0; 2647 case CHIP_HAWAII: 2648 case CHIP_CARRIZO: 2649 case CHIP_STONEY: 2650 case CHIP_POLARIS10: 2651 case CHIP_POLARIS11: 2652 case CHIP_POLARIS12: 2653 case CHIP_VEGAM: 2654 case CHIP_TONGA: 2655 case CHIP_FIJI: 2656 case CHIP_VEGA10: 2657 case CHIP_VEGA12: 2658 case CHIP_VEGA20: 2659 #if defined(CONFIG_DRM_AMD_DC_DCN) 2660 case CHIP_RAVEN: 2661 case CHIP_NAVI10: 2662 case CHIP_NAVI14: 2663 case CHIP_NAVI12: 2664 case CHIP_RENOIR: 2665 #endif 2666 return amdgpu_dc != 0; 2667 #endif 2668 default: 2669 if (amdgpu_dc > 0) 2670 DRM_INFO("Display Core has been requested via kernel parameter " 2671 "but isn't supported by ASIC, ignoring\n"); 2672 return false; 2673 } 2674 } 2675 2676 /** 2677 * amdgpu_device_has_dc_support - check if dc is supported 2678 * 2679 * @adev: amdgpu_device_pointer 2680 * 2681 * Returns true for supported, false for not supported 2682 */ 2683 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) 2684 { 2685 if (amdgpu_sriov_vf(adev)) 2686 return false; 2687 2688 return amdgpu_device_asic_has_dc_support(adev->asic_type); 2689 } 2690 2691 2692 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) 2693 { 2694 struct amdgpu_device *adev = 2695 container_of(__work, struct amdgpu_device, xgmi_reset_work); 2696 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); 2697 2698 /* It's a bug to not have a hive within this function */ 2699 if (WARN_ON(!hive)) 2700 return; 2701 2702 /* 2703 * Use task barrier to synchronize all xgmi reset works across the 2704 * hive. task_barrier_enter and task_barrier_exit will block 2705 * until all the threads running the xgmi reset works reach 2706 * those points. task_barrier_full will do both blocks. 2707 */ 2708 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 2709 2710 task_barrier_enter(&hive->tb); 2711 adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev); 2712 2713 if (adev->asic_reset_res) 2714 goto fail; 2715 2716 task_barrier_exit(&hive->tb); 2717 adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev); 2718 2719 if (adev->asic_reset_res) 2720 goto fail; 2721 } else { 2722 2723 task_barrier_full(&hive->tb); 2724 adev->asic_reset_res = amdgpu_asic_reset(adev); 2725 } 2726 2727 fail: 2728 if (adev->asic_reset_res) 2729 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", 2730 adev->asic_reset_res, adev->ddev->unique); 2731 } 2732 2733 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) 2734 { 2735 char *input = amdgpu_lockup_timeout; 2736 char *timeout_setting = NULL; 2737 int index = 0; 2738 long timeout; 2739 int ret = 0; 2740 2741 /* 2742 * By default timeout for non compute jobs is 10000. 2743 * And there is no timeout enforced on compute jobs. 2744 * In SR-IOV or passthrough mode, timeout for compute 2745 * jobs are 10000 by default. 2746 */ 2747 adev->gfx_timeout = msecs_to_jiffies(10000); 2748 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 2749 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) 2750 adev->compute_timeout = adev->gfx_timeout; 2751 else 2752 adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; 2753 2754 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 2755 while ((timeout_setting = strsep(&input, ",")) && 2756 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 2757 ret = kstrtol(timeout_setting, 0, &timeout); 2758 if (ret) 2759 return ret; 2760 2761 if (timeout == 0) { 2762 index++; 2763 continue; 2764 } else if (timeout < 0) { 2765 timeout = MAX_SCHEDULE_TIMEOUT; 2766 } else { 2767 timeout = msecs_to_jiffies(timeout); 2768 } 2769 2770 switch (index++) { 2771 case 0: 2772 adev->gfx_timeout = timeout; 2773 break; 2774 case 1: 2775 adev->compute_timeout = timeout; 2776 break; 2777 case 2: 2778 adev->sdma_timeout = timeout; 2779 break; 2780 case 3: 2781 adev->video_timeout = timeout; 2782 break; 2783 default: 2784 break; 2785 } 2786 } 2787 /* 2788 * There is only one value specified and 2789 * it should apply to all non-compute jobs. 2790 */ 2791 if (index == 1) { 2792 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 2793 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) 2794 adev->compute_timeout = adev->gfx_timeout; 2795 } 2796 } 2797 2798 return ret; 2799 } 2800 2801 /** 2802 * amdgpu_device_init - initialize the driver 2803 * 2804 * @adev: amdgpu_device pointer 2805 * @ddev: drm dev pointer 2806 * @pdev: pci dev pointer 2807 * @flags: driver flags 2808 * 2809 * Initializes the driver info and hw (all asics). 2810 * Returns 0 for success or an error on failure. 2811 * Called at driver startup. 2812 */ 2813 int amdgpu_device_init(struct amdgpu_device *adev, 2814 struct drm_device *ddev, 2815 struct pci_dev *pdev, 2816 uint32_t flags) 2817 { 2818 int r, i; 2819 bool boco = false; 2820 u32 max_MBps; 2821 2822 adev->shutdown = false; 2823 adev->dev = &pdev->dev; 2824 adev->ddev = ddev; 2825 adev->pdev = pdev; 2826 adev->flags = flags; 2827 2828 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) 2829 adev->asic_type = amdgpu_force_asic_type; 2830 else 2831 adev->asic_type = flags & AMD_ASIC_MASK; 2832 2833 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 2834 if (amdgpu_emu_mode == 1) 2835 adev->usec_timeout *= 2; 2836 adev->gmc.gart_size = 512 * 1024 * 1024; 2837 adev->accel_working = false; 2838 adev->num_rings = 0; 2839 adev->mman.buffer_funcs = NULL; 2840 adev->mman.buffer_funcs_ring = NULL; 2841 adev->vm_manager.vm_pte_funcs = NULL; 2842 adev->vm_manager.vm_pte_num_scheds = 0; 2843 adev->gmc.gmc_funcs = NULL; 2844 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 2845 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 2846 2847 adev->smc_rreg = &amdgpu_invalid_rreg; 2848 adev->smc_wreg = &amdgpu_invalid_wreg; 2849 adev->pcie_rreg = &amdgpu_invalid_rreg; 2850 adev->pcie_wreg = &amdgpu_invalid_wreg; 2851 adev->pciep_rreg = &amdgpu_invalid_rreg; 2852 adev->pciep_wreg = &amdgpu_invalid_wreg; 2853 adev->pcie_rreg64 = &amdgpu_invalid_rreg64; 2854 adev->pcie_wreg64 = &amdgpu_invalid_wreg64; 2855 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; 2856 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 2857 adev->didt_rreg = &amdgpu_invalid_rreg; 2858 adev->didt_wreg = &amdgpu_invalid_wreg; 2859 adev->gc_cac_rreg = &amdgpu_invalid_rreg; 2860 adev->gc_cac_wreg = &amdgpu_invalid_wreg; 2861 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 2862 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 2863 2864 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 2865 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 2866 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 2867 2868 /* mutex initialization are all done here so we 2869 * can recall function without having locking issues */ 2870 atomic_set(&adev->irq.ih.lock, 0); 2871 mutex_init(&adev->firmware.mutex); 2872 mutex_init(&adev->pm.mutex); 2873 mutex_init(&adev->gfx.gpu_clock_mutex); 2874 mutex_init(&adev->srbm_mutex); 2875 mutex_init(&adev->gfx.pipe_reserve_mutex); 2876 mutex_init(&adev->gfx.gfx_off_mutex); 2877 mutex_init(&adev->grbm_idx_mutex); 2878 mutex_init(&adev->mn_lock); 2879 mutex_init(&adev->virt.vf_errors.lock); 2880 hash_init(adev->mn_hash); 2881 mutex_init(&adev->lock_reset); 2882 mutex_init(&adev->psp.mutex); 2883 mutex_init(&adev->notifier_lock); 2884 2885 r = amdgpu_device_check_arguments(adev); 2886 if (r) 2887 return r; 2888 2889 spin_lock_init(&adev->mmio_idx_lock); 2890 spin_lock_init(&adev->smc_idx_lock); 2891 spin_lock_init(&adev->pcie_idx_lock); 2892 spin_lock_init(&adev->uvd_ctx_idx_lock); 2893 spin_lock_init(&adev->didt_idx_lock); 2894 spin_lock_init(&adev->gc_cac_idx_lock); 2895 spin_lock_init(&adev->se_cac_idx_lock); 2896 spin_lock_init(&adev->audio_endpt_idx_lock); 2897 spin_lock_init(&adev->mm_stats.lock); 2898 2899 INIT_LIST_HEAD(&adev->shadow_list); 2900 mutex_init(&adev->shadow_list_lock); 2901 2902 INIT_LIST_HEAD(&adev->ring_lru_list); 2903 spin_lock_init(&adev->ring_lru_list_lock); 2904 2905 INIT_DELAYED_WORK(&adev->delayed_init_work, 2906 amdgpu_device_delayed_init_work_handler); 2907 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, 2908 amdgpu_device_delay_enable_gfx_off); 2909 2910 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); 2911 2912 adev->gfx.gfx_off_req_count = 1; 2913 adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false; 2914 2915 /* Registers mapping */ 2916 /* TODO: block userspace mapping of io register */ 2917 if (adev->asic_type >= CHIP_BONAIRE) { 2918 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 2919 adev->rmmio_size = pci_resource_len(adev->pdev, 5); 2920 } else { 2921 adev->rmmio_base = pci_resource_start(adev->pdev, 2); 2922 adev->rmmio_size = pci_resource_len(adev->pdev, 2); 2923 } 2924 2925 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); 2926 if (adev->rmmio == NULL) { 2927 return -ENOMEM; 2928 } 2929 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); 2930 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); 2931 2932 /* io port mapping */ 2933 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 2934 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { 2935 adev->rio_mem_size = pci_resource_len(adev->pdev, i); 2936 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size); 2937 break; 2938 } 2939 } 2940 if (adev->rio_mem == NULL) 2941 DRM_INFO("PCI I/O BAR is not found.\n"); 2942 2943 /* enable PCIE atomic ops */ 2944 r = pci_enable_atomic_ops_to_root(adev->pdev, 2945 PCI_EXP_DEVCAP2_ATOMIC_COMP32 | 2946 PCI_EXP_DEVCAP2_ATOMIC_COMP64); 2947 if (r) { 2948 adev->have_atomics_support = false; 2949 DRM_INFO("PCIE atomic ops is not supported\n"); 2950 } else { 2951 adev->have_atomics_support = true; 2952 } 2953 2954 amdgpu_device_get_pcie_info(adev); 2955 2956 if (amdgpu_mcbp) 2957 DRM_INFO("MCBP is enabled\n"); 2958 2959 if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10) 2960 adev->enable_mes = true; 2961 2962 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) { 2963 r = amdgpu_discovery_init(adev); 2964 if (r) { 2965 dev_err(adev->dev, "amdgpu_discovery_init failed\n"); 2966 return r; 2967 } 2968 } 2969 2970 /* early init functions */ 2971 r = amdgpu_device_ip_early_init(adev); 2972 if (r) 2973 return r; 2974 2975 r = amdgpu_device_get_job_timeout_settings(adev); 2976 if (r) { 2977 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); 2978 return r; 2979 } 2980 2981 /* doorbell bar mapping and doorbell index init*/ 2982 amdgpu_device_doorbell_init(adev); 2983 2984 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 2985 /* this will fail for cards that aren't VGA class devices, just 2986 * ignore it */ 2987 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); 2988 2989 if (amdgpu_device_supports_boco(ddev)) 2990 boco = true; 2991 if (amdgpu_has_atpx() && 2992 (amdgpu_is_atpx_hybrid() || 2993 amdgpu_has_atpx_dgpu_power_cntl()) && 2994 !pci_is_thunderbolt_attached(adev->pdev)) 2995 vga_switcheroo_register_client(adev->pdev, 2996 &amdgpu_switcheroo_ops, boco); 2997 if (boco) 2998 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 2999 3000 if (amdgpu_emu_mode == 1) { 3001 /* post the asic on emulation mode */ 3002 emu_soc_asic_init(adev); 3003 goto fence_driver_init; 3004 } 3005 3006 /* detect if we are with an SRIOV vbios */ 3007 amdgpu_device_detect_sriov_bios(adev); 3008 3009 /* check if we need to reset the asic 3010 * E.g., driver was not cleanly unloaded previously, etc. 3011 */ 3012 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { 3013 r = amdgpu_asic_reset(adev); 3014 if (r) { 3015 dev_err(adev->dev, "asic reset on init failed\n"); 3016 goto failed; 3017 } 3018 } 3019 3020 /* Post card if necessary */ 3021 if (amdgpu_device_need_post(adev)) { 3022 if (!adev->bios) { 3023 dev_err(adev->dev, "no vBIOS found\n"); 3024 r = -EINVAL; 3025 goto failed; 3026 } 3027 DRM_INFO("GPU posting now...\n"); 3028 r = amdgpu_atom_asic_init(adev->mode_info.atom_context); 3029 if (r) { 3030 dev_err(adev->dev, "gpu post error!\n"); 3031 goto failed; 3032 } 3033 } 3034 3035 if (adev->is_atom_fw) { 3036 /* Initialize clocks */ 3037 r = amdgpu_atomfirmware_get_clock_info(adev); 3038 if (r) { 3039 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); 3040 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 3041 goto failed; 3042 } 3043 } else { 3044 /* Initialize clocks */ 3045 r = amdgpu_atombios_get_clock_info(adev); 3046 if (r) { 3047 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 3048 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 3049 goto failed; 3050 } 3051 /* init i2c buses */ 3052 if (!amdgpu_device_has_dc_support(adev)) 3053 amdgpu_atombios_i2c_init(adev); 3054 } 3055 3056 fence_driver_init: 3057 /* Fence driver */ 3058 r = amdgpu_fence_driver_init(adev); 3059 if (r) { 3060 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); 3061 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); 3062 goto failed; 3063 } 3064 3065 /* init the mode config */ 3066 drm_mode_config_init(adev->ddev); 3067 3068 r = amdgpu_device_ip_init(adev); 3069 if (r) { 3070 /* failed in exclusive mode due to timeout */ 3071 if (amdgpu_sriov_vf(adev) && 3072 !amdgpu_sriov_runtime(adev) && 3073 amdgpu_virt_mmio_blocked(adev) && 3074 !amdgpu_virt_wait_reset(adev)) { 3075 dev_err(adev->dev, "VF exclusive mode timeout\n"); 3076 /* Don't send request since VF is inactive. */ 3077 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 3078 adev->virt.ops = NULL; 3079 r = -EAGAIN; 3080 goto failed; 3081 } 3082 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); 3083 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 3084 goto failed; 3085 } 3086 3087 DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", 3088 adev->gfx.config.max_shader_engines, 3089 adev->gfx.config.max_sh_per_se, 3090 adev->gfx.config.max_cu_per_sh, 3091 adev->gfx.cu_info.number); 3092 3093 amdgpu_ctx_init_sched(adev); 3094 3095 adev->accel_working = true; 3096 3097 amdgpu_vm_check_compute_bug(adev); 3098 3099 /* Initialize the buffer migration limit. */ 3100 if (amdgpu_moverate >= 0) 3101 max_MBps = amdgpu_moverate; 3102 else 3103 max_MBps = 8; /* Allow 8 MB/s. */ 3104 /* Get a log2 for easy divisions. */ 3105 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); 3106 3107 amdgpu_fbdev_init(adev); 3108 3109 r = amdgpu_pm_sysfs_init(adev); 3110 if (r) { 3111 adev->pm_sysfs_en = false; 3112 DRM_ERROR("registering pm debugfs failed (%d).\n", r); 3113 } else 3114 adev->pm_sysfs_en = true; 3115 3116 r = amdgpu_ucode_sysfs_init(adev); 3117 if (r) { 3118 adev->ucode_sysfs_en = false; 3119 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); 3120 } else 3121 adev->ucode_sysfs_en = true; 3122 3123 r = amdgpu_debugfs_gem_init(adev); 3124 if (r) 3125 DRM_ERROR("registering gem debugfs failed (%d).\n", r); 3126 3127 r = amdgpu_debugfs_regs_init(adev); 3128 if (r) 3129 DRM_ERROR("registering register debugfs failed (%d).\n", r); 3130 3131 r = amdgpu_debugfs_firmware_init(adev); 3132 if (r) 3133 DRM_ERROR("registering firmware debugfs failed (%d).\n", r); 3134 3135 r = amdgpu_debugfs_init(adev); 3136 if (r) 3137 DRM_ERROR("Creating debugfs files failed (%d).\n", r); 3138 3139 if ((amdgpu_testing & 1)) { 3140 if (adev->accel_working) 3141 amdgpu_test_moves(adev); 3142 else 3143 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n"); 3144 } 3145 if (amdgpu_benchmarking) { 3146 if (adev->accel_working) 3147 amdgpu_benchmark(adev, amdgpu_benchmarking); 3148 else 3149 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); 3150 } 3151 3152 /* 3153 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. 3154 * Otherwise the mgpu fan boost feature will be skipped due to the 3155 * gpu instance is counted less. 3156 */ 3157 amdgpu_register_gpu_instance(adev); 3158 3159 /* enable clockgating, etc. after ib tests, etc. since some blocks require 3160 * explicit gating rather than handling it automatically. 3161 */ 3162 r = amdgpu_device_ip_late_init(adev); 3163 if (r) { 3164 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); 3165 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); 3166 goto failed; 3167 } 3168 3169 /* must succeed. */ 3170 amdgpu_ras_resume(adev); 3171 3172 queue_delayed_work(system_wq, &adev->delayed_init_work, 3173 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3174 3175 r = device_create_file(adev->dev, &dev_attr_pcie_replay_count); 3176 if (r) { 3177 dev_err(adev->dev, "Could not create pcie_replay_count"); 3178 return r; 3179 } 3180 3181 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 3182 r = amdgpu_pmu_init(adev); 3183 if (r) 3184 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); 3185 3186 return 0; 3187 3188 failed: 3189 amdgpu_vf_error_trans_all(adev); 3190 if (boco) 3191 vga_switcheroo_fini_domain_pm_ops(adev->dev); 3192 3193 return r; 3194 } 3195 3196 /** 3197 * amdgpu_device_fini - tear down the driver 3198 * 3199 * @adev: amdgpu_device pointer 3200 * 3201 * Tear down the driver info (all asics). 3202 * Called at driver shutdown. 3203 */ 3204 void amdgpu_device_fini(struct amdgpu_device *adev) 3205 { 3206 int r; 3207 3208 DRM_INFO("amdgpu: finishing device.\n"); 3209 flush_delayed_work(&adev->delayed_init_work); 3210 adev->shutdown = true; 3211 3212 /* disable all interrupts */ 3213 amdgpu_irq_disable_all(adev); 3214 if (adev->mode_info.mode_config_initialized){ 3215 if (!amdgpu_device_has_dc_support(adev)) 3216 drm_helper_force_disable_all(adev->ddev); 3217 else 3218 drm_atomic_helper_shutdown(adev->ddev); 3219 } 3220 amdgpu_fence_driver_fini(adev); 3221 if (adev->pm_sysfs_en) 3222 amdgpu_pm_sysfs_fini(adev); 3223 amdgpu_fbdev_fini(adev); 3224 r = amdgpu_device_ip_fini(adev); 3225 if (adev->firmware.gpu_info_fw) { 3226 release_firmware(adev->firmware.gpu_info_fw); 3227 adev->firmware.gpu_info_fw = NULL; 3228 } 3229 adev->accel_working = false; 3230 /* free i2c buses */ 3231 if (!amdgpu_device_has_dc_support(adev)) 3232 amdgpu_i2c_fini(adev); 3233 3234 if (amdgpu_emu_mode != 1) 3235 amdgpu_atombios_fini(adev); 3236 3237 kfree(adev->bios); 3238 adev->bios = NULL; 3239 if (amdgpu_has_atpx() && 3240 (amdgpu_is_atpx_hybrid() || 3241 amdgpu_has_atpx_dgpu_power_cntl()) && 3242 !pci_is_thunderbolt_attached(adev->pdev)) 3243 vga_switcheroo_unregister_client(adev->pdev); 3244 if (amdgpu_device_supports_boco(adev->ddev)) 3245 vga_switcheroo_fini_domain_pm_ops(adev->dev); 3246 vga_client_register(adev->pdev, NULL, NULL, NULL); 3247 if (adev->rio_mem) 3248 pci_iounmap(adev->pdev, adev->rio_mem); 3249 adev->rio_mem = NULL; 3250 iounmap(adev->rmmio); 3251 adev->rmmio = NULL; 3252 amdgpu_device_doorbell_fini(adev); 3253 3254 amdgpu_debugfs_regs_cleanup(adev); 3255 device_remove_file(adev->dev, &dev_attr_pcie_replay_count); 3256 if (adev->ucode_sysfs_en) 3257 amdgpu_ucode_sysfs_fini(adev); 3258 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 3259 amdgpu_pmu_fini(adev); 3260 amdgpu_debugfs_preempt_cleanup(adev); 3261 if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) 3262 amdgpu_discovery_fini(adev); 3263 } 3264 3265 3266 /* 3267 * Suspend & resume. 3268 */ 3269 /** 3270 * amdgpu_device_suspend - initiate device suspend 3271 * 3272 * @dev: drm dev pointer 3273 * @suspend: suspend state 3274 * @fbcon : notify the fbdev of suspend 3275 * 3276 * Puts the hw in the suspend state (all asics). 3277 * Returns 0 for success or an error on failure. 3278 * Called at driver suspend. 3279 */ 3280 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) 3281 { 3282 struct amdgpu_device *adev; 3283 struct drm_crtc *crtc; 3284 struct drm_connector *connector; 3285 struct drm_connector_list_iter iter; 3286 int r; 3287 3288 if (dev == NULL || dev->dev_private == NULL) { 3289 return -ENODEV; 3290 } 3291 3292 adev = dev->dev_private; 3293 3294 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 3295 return 0; 3296 3297 adev->in_suspend = true; 3298 drm_kms_helper_poll_disable(dev); 3299 3300 if (fbcon) 3301 amdgpu_fbdev_set_suspend(adev, 1); 3302 3303 cancel_delayed_work_sync(&adev->delayed_init_work); 3304 3305 if (!amdgpu_device_has_dc_support(adev)) { 3306 /* turn off display hw */ 3307 drm_modeset_lock_all(dev); 3308 drm_connector_list_iter_begin(dev, &iter); 3309 drm_for_each_connector_iter(connector, &iter) 3310 drm_helper_connector_dpms(connector, 3311 DRM_MODE_DPMS_OFF); 3312 drm_connector_list_iter_end(&iter); 3313 drm_modeset_unlock_all(dev); 3314 /* unpin the front buffers and cursors */ 3315 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3316 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 3317 struct drm_framebuffer *fb = crtc->primary->fb; 3318 struct amdgpu_bo *robj; 3319 3320 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 3321 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 3322 r = amdgpu_bo_reserve(aobj, true); 3323 if (r == 0) { 3324 amdgpu_bo_unpin(aobj); 3325 amdgpu_bo_unreserve(aobj); 3326 } 3327 } 3328 3329 if (fb == NULL || fb->obj[0] == NULL) { 3330 continue; 3331 } 3332 robj = gem_to_amdgpu_bo(fb->obj[0]); 3333 /* don't unpin kernel fb objects */ 3334 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { 3335 r = amdgpu_bo_reserve(robj, true); 3336 if (r == 0) { 3337 amdgpu_bo_unpin(robj); 3338 amdgpu_bo_unreserve(robj); 3339 } 3340 } 3341 } 3342 } 3343 3344 amdgpu_amdkfd_suspend(adev, !fbcon); 3345 3346 amdgpu_ras_suspend(adev); 3347 3348 r = amdgpu_device_ip_suspend_phase1(adev); 3349 3350 /* evict vram memory */ 3351 amdgpu_bo_evict_vram(adev); 3352 3353 amdgpu_fence_driver_suspend(adev); 3354 3355 r = amdgpu_device_ip_suspend_phase2(adev); 3356 3357 /* evict remaining vram memory 3358 * This second call to evict vram is to evict the gart page table 3359 * using the CPU. 3360 */ 3361 amdgpu_bo_evict_vram(adev); 3362 3363 return 0; 3364 } 3365 3366 /** 3367 * amdgpu_device_resume - initiate device resume 3368 * 3369 * @dev: drm dev pointer 3370 * @resume: resume state 3371 * @fbcon : notify the fbdev of resume 3372 * 3373 * Bring the hw back to operating state (all asics). 3374 * Returns 0 for success or an error on failure. 3375 * Called at driver resume. 3376 */ 3377 int amdgpu_device_resume(struct drm_device *dev, bool fbcon) 3378 { 3379 struct drm_connector *connector; 3380 struct drm_connector_list_iter iter; 3381 struct amdgpu_device *adev = dev->dev_private; 3382 struct drm_crtc *crtc; 3383 int r = 0; 3384 3385 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 3386 return 0; 3387 3388 /* post card */ 3389 if (amdgpu_device_need_post(adev)) { 3390 r = amdgpu_atom_asic_init(adev->mode_info.atom_context); 3391 if (r) 3392 DRM_ERROR("amdgpu asic init failed\n"); 3393 } 3394 3395 r = amdgpu_device_ip_resume(adev); 3396 if (r) { 3397 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r); 3398 return r; 3399 } 3400 amdgpu_fence_driver_resume(adev); 3401 3402 3403 r = amdgpu_device_ip_late_init(adev); 3404 if (r) 3405 return r; 3406 3407 queue_delayed_work(system_wq, &adev->delayed_init_work, 3408 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3409 3410 if (!amdgpu_device_has_dc_support(adev)) { 3411 /* pin cursors */ 3412 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3413 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 3414 3415 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 3416 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 3417 r = amdgpu_bo_reserve(aobj, true); 3418 if (r == 0) { 3419 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 3420 if (r != 0) 3421 DRM_ERROR("Failed to pin cursor BO (%d)\n", r); 3422 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 3423 amdgpu_bo_unreserve(aobj); 3424 } 3425 } 3426 } 3427 } 3428 r = amdgpu_amdkfd_resume(adev, !fbcon); 3429 if (r) 3430 return r; 3431 3432 /* Make sure IB tests flushed */ 3433 flush_delayed_work(&adev->delayed_init_work); 3434 3435 /* blat the mode back in */ 3436 if (fbcon) { 3437 if (!amdgpu_device_has_dc_support(adev)) { 3438 /* pre DCE11 */ 3439 drm_helper_resume_force_mode(dev); 3440 3441 /* turn on display hw */ 3442 drm_modeset_lock_all(dev); 3443 3444 drm_connector_list_iter_begin(dev, &iter); 3445 drm_for_each_connector_iter(connector, &iter) 3446 drm_helper_connector_dpms(connector, 3447 DRM_MODE_DPMS_ON); 3448 drm_connector_list_iter_end(&iter); 3449 3450 drm_modeset_unlock_all(dev); 3451 } 3452 amdgpu_fbdev_set_suspend(adev, 0); 3453 } 3454 3455 drm_kms_helper_poll_enable(dev); 3456 3457 amdgpu_ras_resume(adev); 3458 3459 /* 3460 * Most of the connector probing functions try to acquire runtime pm 3461 * refs to ensure that the GPU is powered on when connector polling is 3462 * performed. Since we're calling this from a runtime PM callback, 3463 * trying to acquire rpm refs will cause us to deadlock. 3464 * 3465 * Since we're guaranteed to be holding the rpm lock, it's safe to 3466 * temporarily disable the rpm helpers so this doesn't deadlock us. 3467 */ 3468 #ifdef CONFIG_PM 3469 dev->dev->power.disable_depth++; 3470 #endif 3471 if (!amdgpu_device_has_dc_support(adev)) 3472 drm_helper_hpd_irq_event(dev); 3473 else 3474 drm_kms_helper_hotplug_event(dev); 3475 #ifdef CONFIG_PM 3476 dev->dev->power.disable_depth--; 3477 #endif 3478 adev->in_suspend = false; 3479 3480 return 0; 3481 } 3482 3483 /** 3484 * amdgpu_device_ip_check_soft_reset - did soft reset succeed 3485 * 3486 * @adev: amdgpu_device pointer 3487 * 3488 * The list of all the hardware IPs that make up the asic is walked and 3489 * the check_soft_reset callbacks are run. check_soft_reset determines 3490 * if the asic is still hung or not. 3491 * Returns true if any of the IPs are still in a hung state, false if not. 3492 */ 3493 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) 3494 { 3495 int i; 3496 bool asic_hang = false; 3497 3498 if (amdgpu_sriov_vf(adev)) 3499 return true; 3500 3501 if (amdgpu_asic_need_full_reset(adev)) 3502 return true; 3503 3504 for (i = 0; i < adev->num_ip_blocks; i++) { 3505 if (!adev->ip_blocks[i].status.valid) 3506 continue; 3507 if (adev->ip_blocks[i].version->funcs->check_soft_reset) 3508 adev->ip_blocks[i].status.hang = 3509 adev->ip_blocks[i].version->funcs->check_soft_reset(adev); 3510 if (adev->ip_blocks[i].status.hang) { 3511 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); 3512 asic_hang = true; 3513 } 3514 } 3515 return asic_hang; 3516 } 3517 3518 /** 3519 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset 3520 * 3521 * @adev: amdgpu_device pointer 3522 * 3523 * The list of all the hardware IPs that make up the asic is walked and the 3524 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset 3525 * handles any IP specific hardware or software state changes that are 3526 * necessary for a soft reset to succeed. 3527 * Returns 0 on success, negative error code on failure. 3528 */ 3529 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) 3530 { 3531 int i, r = 0; 3532 3533 for (i = 0; i < adev->num_ip_blocks; i++) { 3534 if (!adev->ip_blocks[i].status.valid) 3535 continue; 3536 if (adev->ip_blocks[i].status.hang && 3537 adev->ip_blocks[i].version->funcs->pre_soft_reset) { 3538 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); 3539 if (r) 3540 return r; 3541 } 3542 } 3543 3544 return 0; 3545 } 3546 3547 /** 3548 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed 3549 * 3550 * @adev: amdgpu_device pointer 3551 * 3552 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu 3553 * reset is necessary to recover. 3554 * Returns true if a full asic reset is required, false if not. 3555 */ 3556 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) 3557 { 3558 int i; 3559 3560 if (amdgpu_asic_need_full_reset(adev)) 3561 return true; 3562 3563 for (i = 0; i < adev->num_ip_blocks; i++) { 3564 if (!adev->ip_blocks[i].status.valid) 3565 continue; 3566 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || 3567 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || 3568 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || 3569 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || 3570 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 3571 if (adev->ip_blocks[i].status.hang) { 3572 DRM_INFO("Some block need full reset!\n"); 3573 return true; 3574 } 3575 } 3576 } 3577 return false; 3578 } 3579 3580 /** 3581 * amdgpu_device_ip_soft_reset - do a soft reset 3582 * 3583 * @adev: amdgpu_device pointer 3584 * 3585 * The list of all the hardware IPs that make up the asic is walked and the 3586 * soft_reset callbacks are run if the block is hung. soft_reset handles any 3587 * IP specific hardware or software state changes that are necessary to soft 3588 * reset the IP. 3589 * Returns 0 on success, negative error code on failure. 3590 */ 3591 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) 3592 { 3593 int i, r = 0; 3594 3595 for (i = 0; i < adev->num_ip_blocks; i++) { 3596 if (!adev->ip_blocks[i].status.valid) 3597 continue; 3598 if (adev->ip_blocks[i].status.hang && 3599 adev->ip_blocks[i].version->funcs->soft_reset) { 3600 r = adev->ip_blocks[i].version->funcs->soft_reset(adev); 3601 if (r) 3602 return r; 3603 } 3604 } 3605 3606 return 0; 3607 } 3608 3609 /** 3610 * amdgpu_device_ip_post_soft_reset - clean up from soft reset 3611 * 3612 * @adev: amdgpu_device pointer 3613 * 3614 * The list of all the hardware IPs that make up the asic is walked and the 3615 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset 3616 * handles any IP specific hardware or software state changes that are 3617 * necessary after the IP has been soft reset. 3618 * Returns 0 on success, negative error code on failure. 3619 */ 3620 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) 3621 { 3622 int i, r = 0; 3623 3624 for (i = 0; i < adev->num_ip_blocks; i++) { 3625 if (!adev->ip_blocks[i].status.valid) 3626 continue; 3627 if (adev->ip_blocks[i].status.hang && 3628 adev->ip_blocks[i].version->funcs->post_soft_reset) 3629 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); 3630 if (r) 3631 return r; 3632 } 3633 3634 return 0; 3635 } 3636 3637 /** 3638 * amdgpu_device_recover_vram - Recover some VRAM contents 3639 * 3640 * @adev: amdgpu_device pointer 3641 * 3642 * Restores the contents of VRAM buffers from the shadows in GTT. Used to 3643 * restore things like GPUVM page tables after a GPU reset where 3644 * the contents of VRAM might be lost. 3645 * 3646 * Returns: 3647 * 0 on success, negative error code on failure. 3648 */ 3649 static int amdgpu_device_recover_vram(struct amdgpu_device *adev) 3650 { 3651 struct dma_fence *fence = NULL, *next = NULL; 3652 struct amdgpu_bo *shadow; 3653 long r = 1, tmo; 3654 3655 if (amdgpu_sriov_runtime(adev)) 3656 tmo = msecs_to_jiffies(8000); 3657 else 3658 tmo = msecs_to_jiffies(100); 3659 3660 DRM_INFO("recover vram bo from shadow start\n"); 3661 mutex_lock(&adev->shadow_list_lock); 3662 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { 3663 3664 /* No need to recover an evicted BO */ 3665 if (shadow->tbo.mem.mem_type != TTM_PL_TT || 3666 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || 3667 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) 3668 continue; 3669 3670 r = amdgpu_bo_restore_shadow(shadow, &next); 3671 if (r) 3672 break; 3673 3674 if (fence) { 3675 tmo = dma_fence_wait_timeout(fence, false, tmo); 3676 dma_fence_put(fence); 3677 fence = next; 3678 if (tmo == 0) { 3679 r = -ETIMEDOUT; 3680 break; 3681 } else if (tmo < 0) { 3682 r = tmo; 3683 break; 3684 } 3685 } else { 3686 fence = next; 3687 } 3688 } 3689 mutex_unlock(&adev->shadow_list_lock); 3690 3691 if (fence) 3692 tmo = dma_fence_wait_timeout(fence, false, tmo); 3693 dma_fence_put(fence); 3694 3695 if (r < 0 || tmo <= 0) { 3696 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); 3697 return -EIO; 3698 } 3699 3700 DRM_INFO("recover vram bo from shadow done\n"); 3701 return 0; 3702 } 3703 3704 3705 /** 3706 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf 3707 * 3708 * @adev: amdgpu device pointer 3709 * @from_hypervisor: request from hypervisor 3710 * 3711 * do VF FLR and reinitialize Asic 3712 * return 0 means succeeded otherwise failed 3713 */ 3714 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, 3715 bool from_hypervisor) 3716 { 3717 int r; 3718 3719 if (from_hypervisor) 3720 r = amdgpu_virt_request_full_gpu(adev, true); 3721 else 3722 r = amdgpu_virt_reset_gpu(adev); 3723 if (r) 3724 return r; 3725 3726 /* Resume IP prior to SMC */ 3727 r = amdgpu_device_ip_reinit_early_sriov(adev); 3728 if (r) 3729 goto error; 3730 3731 amdgpu_virt_init_data_exchange(adev); 3732 /* we need recover gart prior to run SMC/CP/SDMA resume */ 3733 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); 3734 3735 r = amdgpu_device_fw_loading(adev); 3736 if (r) 3737 return r; 3738 3739 /* now we are okay to resume SMC/CP/SDMA */ 3740 r = amdgpu_device_ip_reinit_late_sriov(adev); 3741 if (r) 3742 goto error; 3743 3744 amdgpu_irq_gpu_reset_resume_helper(adev); 3745 r = amdgpu_ib_ring_tests(adev); 3746 amdgpu_amdkfd_post_reset(adev); 3747 3748 error: 3749 amdgpu_virt_release_full_gpu(adev, true); 3750 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 3751 amdgpu_inc_vram_lost(adev); 3752 r = amdgpu_device_recover_vram(adev); 3753 } 3754 3755 return r; 3756 } 3757 3758 /** 3759 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery 3760 * 3761 * @adev: amdgpu device pointer 3762 * 3763 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover 3764 * a hung GPU. 3765 */ 3766 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) 3767 { 3768 if (!amdgpu_device_ip_check_soft_reset(adev)) { 3769 DRM_INFO("Timeout, but no hardware hang detected.\n"); 3770 return false; 3771 } 3772 3773 if (amdgpu_gpu_recovery == 0) 3774 goto disabled; 3775 3776 if (amdgpu_sriov_vf(adev)) 3777 return true; 3778 3779 if (amdgpu_gpu_recovery == -1) { 3780 switch (adev->asic_type) { 3781 case CHIP_BONAIRE: 3782 case CHIP_HAWAII: 3783 case CHIP_TOPAZ: 3784 case CHIP_TONGA: 3785 case CHIP_FIJI: 3786 case CHIP_POLARIS10: 3787 case CHIP_POLARIS11: 3788 case CHIP_POLARIS12: 3789 case CHIP_VEGAM: 3790 case CHIP_VEGA20: 3791 case CHIP_VEGA10: 3792 case CHIP_VEGA12: 3793 case CHIP_RAVEN: 3794 case CHIP_ARCTURUS: 3795 case CHIP_RENOIR: 3796 case CHIP_NAVI10: 3797 case CHIP_NAVI14: 3798 case CHIP_NAVI12: 3799 break; 3800 default: 3801 goto disabled; 3802 } 3803 } 3804 3805 return true; 3806 3807 disabled: 3808 DRM_INFO("GPU recovery disabled.\n"); 3809 return false; 3810 } 3811 3812 3813 static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 3814 struct amdgpu_job *job, 3815 bool *need_full_reset_arg) 3816 { 3817 int i, r = 0; 3818 bool need_full_reset = *need_full_reset_arg; 3819 3820 /* block all schedulers and reset given job's ring */ 3821 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 3822 struct amdgpu_ring *ring = adev->rings[i]; 3823 3824 if (!ring || !ring->sched.thread) 3825 continue; 3826 3827 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 3828 amdgpu_fence_driver_force_completion(ring); 3829 } 3830 3831 if(job) 3832 drm_sched_increase_karma(&job->base); 3833 3834 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */ 3835 if (!amdgpu_sriov_vf(adev)) { 3836 3837 if (!need_full_reset) 3838 need_full_reset = amdgpu_device_ip_need_full_reset(adev); 3839 3840 if (!need_full_reset) { 3841 amdgpu_device_ip_pre_soft_reset(adev); 3842 r = amdgpu_device_ip_soft_reset(adev); 3843 amdgpu_device_ip_post_soft_reset(adev); 3844 if (r || amdgpu_device_ip_check_soft_reset(adev)) { 3845 DRM_INFO("soft reset failed, will fallback to full reset!\n"); 3846 need_full_reset = true; 3847 } 3848 } 3849 3850 if (need_full_reset) 3851 r = amdgpu_device_ip_suspend(adev); 3852 3853 *need_full_reset_arg = need_full_reset; 3854 } 3855 3856 return r; 3857 } 3858 3859 static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, 3860 struct list_head *device_list_handle, 3861 bool *need_full_reset_arg) 3862 { 3863 struct amdgpu_device *tmp_adev = NULL; 3864 bool need_full_reset = *need_full_reset_arg, vram_lost = false; 3865 int r = 0; 3866 3867 /* 3868 * ASIC reset has to be done on all HGMI hive nodes ASAP 3869 * to allow proper links negotiation in FW (within 1 sec) 3870 */ 3871 if (need_full_reset) { 3872 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 3873 /* For XGMI run all resets in parallel to speed up the process */ 3874 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 3875 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) 3876 r = -EALREADY; 3877 } else 3878 r = amdgpu_asic_reset(tmp_adev); 3879 3880 if (r) { 3881 DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", 3882 r, tmp_adev->ddev->unique); 3883 break; 3884 } 3885 } 3886 3887 /* For XGMI wait for all resets to complete before proceed */ 3888 if (!r) { 3889 list_for_each_entry(tmp_adev, device_list_handle, 3890 gmc.xgmi.head) { 3891 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 3892 flush_work(&tmp_adev->xgmi_reset_work); 3893 r = tmp_adev->asic_reset_res; 3894 if (r) 3895 break; 3896 } 3897 } 3898 } 3899 } 3900 3901 if (!r && amdgpu_ras_intr_triggered()) 3902 amdgpu_ras_intr_cleared(); 3903 3904 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 3905 if (need_full_reset) { 3906 /* post card */ 3907 if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context)) 3908 DRM_WARN("asic atom init failed!"); 3909 3910 if (!r) { 3911 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); 3912 r = amdgpu_device_ip_resume_phase1(tmp_adev); 3913 if (r) 3914 goto out; 3915 3916 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); 3917 if (vram_lost) { 3918 DRM_INFO("VRAM is lost due to GPU reset!\n"); 3919 amdgpu_inc_vram_lost(tmp_adev); 3920 } 3921 3922 r = amdgpu_gtt_mgr_recover( 3923 &tmp_adev->mman.bdev.man[TTM_PL_TT]); 3924 if (r) 3925 goto out; 3926 3927 r = amdgpu_device_fw_loading(tmp_adev); 3928 if (r) 3929 return r; 3930 3931 r = amdgpu_device_ip_resume_phase2(tmp_adev); 3932 if (r) 3933 goto out; 3934 3935 if (vram_lost) 3936 amdgpu_device_fill_reset_magic(tmp_adev); 3937 3938 /* 3939 * Add this ASIC as tracked as reset was already 3940 * complete successfully. 3941 */ 3942 amdgpu_register_gpu_instance(tmp_adev); 3943 3944 r = amdgpu_device_ip_late_init(tmp_adev); 3945 if (r) 3946 goto out; 3947 3948 /* must succeed. */ 3949 amdgpu_ras_resume(tmp_adev); 3950 3951 /* Update PSP FW topology after reset */ 3952 if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1) 3953 r = amdgpu_xgmi_update_topology(hive, tmp_adev); 3954 } 3955 } 3956 3957 3958 out: 3959 if (!r) { 3960 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); 3961 r = amdgpu_ib_ring_tests(tmp_adev); 3962 if (r) { 3963 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); 3964 r = amdgpu_device_ip_suspend(tmp_adev); 3965 need_full_reset = true; 3966 r = -EAGAIN; 3967 goto end; 3968 } 3969 } 3970 3971 if (!r) 3972 r = amdgpu_device_recover_vram(tmp_adev); 3973 else 3974 tmp_adev->asic_reset_res = r; 3975 } 3976 3977 end: 3978 *need_full_reset_arg = need_full_reset; 3979 return r; 3980 } 3981 3982 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock) 3983 { 3984 if (trylock) { 3985 if (!mutex_trylock(&adev->lock_reset)) 3986 return false; 3987 } else 3988 mutex_lock(&adev->lock_reset); 3989 3990 atomic_inc(&adev->gpu_reset_counter); 3991 adev->in_gpu_reset = true; 3992 switch (amdgpu_asic_reset_method(adev)) { 3993 case AMD_RESET_METHOD_MODE1: 3994 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; 3995 break; 3996 case AMD_RESET_METHOD_MODE2: 3997 adev->mp1_state = PP_MP1_STATE_RESET; 3998 break; 3999 default: 4000 adev->mp1_state = PP_MP1_STATE_NONE; 4001 break; 4002 } 4003 4004 return true; 4005 } 4006 4007 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) 4008 { 4009 amdgpu_vf_error_trans_all(adev); 4010 adev->mp1_state = PP_MP1_STATE_NONE; 4011 adev->in_gpu_reset = false; 4012 mutex_unlock(&adev->lock_reset); 4013 } 4014 4015 /** 4016 * amdgpu_device_gpu_recover - reset the asic and recover scheduler 4017 * 4018 * @adev: amdgpu device pointer 4019 * @job: which job trigger hang 4020 * 4021 * Attempt to reset the GPU if it has hung (all asics). 4022 * Attempt to do soft-reset or full-reset and reinitialize Asic 4023 * Returns 0 for success or an error on failure. 4024 */ 4025 4026 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 4027 struct amdgpu_job *job) 4028 { 4029 struct list_head device_list, *device_list_handle = NULL; 4030 bool need_full_reset, job_signaled; 4031 struct amdgpu_hive_info *hive = NULL; 4032 struct amdgpu_device *tmp_adev = NULL; 4033 int i, r = 0; 4034 bool in_ras_intr = amdgpu_ras_intr_triggered(); 4035 bool use_baco = 4036 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ? 4037 true : false; 4038 4039 /* 4040 * Flush RAM to disk so that after reboot 4041 * the user can read log and see why the system rebooted. 4042 */ 4043 if (in_ras_intr && !use_baco && amdgpu_ras_get_context(adev)->reboot) { 4044 4045 DRM_WARN("Emergency reboot."); 4046 4047 ksys_sync_helper(); 4048 emergency_restart(); 4049 } 4050 4051 need_full_reset = job_signaled = false; 4052 INIT_LIST_HEAD(&device_list); 4053 4054 dev_info(adev->dev, "GPU %s begin!\n", 4055 (in_ras_intr && !use_baco) ? "jobs stop":"reset"); 4056 4057 cancel_delayed_work_sync(&adev->delayed_init_work); 4058 4059 hive = amdgpu_get_xgmi_hive(adev, false); 4060 4061 /* 4062 * Here we trylock to avoid chain of resets executing from 4063 * either trigger by jobs on different adevs in XGMI hive or jobs on 4064 * different schedulers for same device while this TO handler is running. 4065 * We always reset all schedulers for device and all devices for XGMI 4066 * hive so that should take care of them too. 4067 */ 4068 4069 if (hive && !mutex_trylock(&hive->reset_lock)) { 4070 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", 4071 job ? job->base.id : -1, hive->hive_id); 4072 return 0; 4073 } 4074 4075 /* Start with adev pre asic reset first for soft reset check.*/ 4076 if (!amdgpu_device_lock_adev(adev, !hive)) { 4077 DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress", 4078 job ? job->base.id : -1); 4079 return 0; 4080 } 4081 4082 /* Block kfd: SRIOV would do it separately */ 4083 if (!amdgpu_sriov_vf(adev)) 4084 amdgpu_amdkfd_pre_reset(adev); 4085 4086 /* Build list of devices to reset */ 4087 if (adev->gmc.xgmi.num_physical_nodes > 1) { 4088 if (!hive) { 4089 /*unlock kfd: SRIOV would do it separately */ 4090 if (!amdgpu_sriov_vf(adev)) 4091 amdgpu_amdkfd_post_reset(adev); 4092 amdgpu_device_unlock_adev(adev); 4093 return -ENODEV; 4094 } 4095 4096 /* 4097 * In case we are in XGMI hive mode device reset is done for all the 4098 * nodes in the hive to retrain all XGMI links and hence the reset 4099 * sequence is executed in loop on all nodes. 4100 */ 4101 device_list_handle = &hive->device_list; 4102 } else { 4103 list_add_tail(&adev->gmc.xgmi.head, &device_list); 4104 device_list_handle = &device_list; 4105 } 4106 4107 /* block all schedulers and reset given job's ring */ 4108 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 4109 if (tmp_adev != adev) { 4110 amdgpu_device_lock_adev(tmp_adev, false); 4111 if (!amdgpu_sriov_vf(tmp_adev)) 4112 amdgpu_amdkfd_pre_reset(tmp_adev); 4113 } 4114 4115 /* 4116 * Mark these ASICs to be reseted as untracked first 4117 * And add them back after reset completed 4118 */ 4119 amdgpu_unregister_gpu_instance(tmp_adev); 4120 4121 /* disable ras on ALL IPs */ 4122 if (!(in_ras_intr && !use_baco) && 4123 amdgpu_device_ip_need_full_reset(tmp_adev)) 4124 amdgpu_ras_suspend(tmp_adev); 4125 4126 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4127 struct amdgpu_ring *ring = tmp_adev->rings[i]; 4128 4129 if (!ring || !ring->sched.thread) 4130 continue; 4131 4132 drm_sched_stop(&ring->sched, job ? &job->base : NULL); 4133 4134 if (in_ras_intr && !use_baco) 4135 amdgpu_job_stop_all_jobs_on_sched(&ring->sched); 4136 } 4137 } 4138 4139 4140 if (in_ras_intr && !use_baco) 4141 goto skip_sched_resume; 4142 4143 /* 4144 * Must check guilty signal here since after this point all old 4145 * HW fences are force signaled. 4146 * 4147 * job->base holds a reference to parent fence 4148 */ 4149 if (job && job->base.s_fence->parent && 4150 dma_fence_is_signaled(job->base.s_fence->parent)) 4151 job_signaled = true; 4152 4153 if (job_signaled) { 4154 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); 4155 goto skip_hw_reset; 4156 } 4157 4158 4159 /* Guilty job will be freed after this*/ 4160 r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset); 4161 if (r) { 4162 /*TODO Should we stop ?*/ 4163 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", 4164 r, adev->ddev->unique); 4165 adev->asic_reset_res = r; 4166 } 4167 4168 retry: /* Rest of adevs pre asic reset from XGMI hive. */ 4169 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 4170 4171 if (tmp_adev == adev) 4172 continue; 4173 4174 r = amdgpu_device_pre_asic_reset(tmp_adev, 4175 NULL, 4176 &need_full_reset); 4177 /*TODO Should we stop ?*/ 4178 if (r) { 4179 DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", 4180 r, tmp_adev->ddev->unique); 4181 tmp_adev->asic_reset_res = r; 4182 } 4183 } 4184 4185 /* Actual ASIC resets if needed.*/ 4186 /* TODO Implement XGMI hive reset logic for SRIOV */ 4187 if (amdgpu_sriov_vf(adev)) { 4188 r = amdgpu_device_reset_sriov(adev, job ? false : true); 4189 if (r) 4190 adev->asic_reset_res = r; 4191 } else { 4192 r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); 4193 if (r && r == -EAGAIN) 4194 goto retry; 4195 } 4196 4197 skip_hw_reset: 4198 4199 /* Post ASIC reset for all devs .*/ 4200 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 4201 4202 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4203 struct amdgpu_ring *ring = tmp_adev->rings[i]; 4204 4205 if (!ring || !ring->sched.thread) 4206 continue; 4207 4208 /* No point to resubmit jobs if we didn't HW reset*/ 4209 if (!tmp_adev->asic_reset_res && !job_signaled) 4210 drm_sched_resubmit_jobs(&ring->sched); 4211 4212 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); 4213 } 4214 4215 if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { 4216 drm_helper_resume_force_mode(tmp_adev->ddev); 4217 } 4218 4219 tmp_adev->asic_reset_res = 0; 4220 4221 if (r) { 4222 /* bad news, how to tell it to userspace ? */ 4223 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); 4224 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); 4225 } else { 4226 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); 4227 } 4228 } 4229 4230 skip_sched_resume: 4231 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 4232 /*unlock kfd: SRIOV would do it separately */ 4233 if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev)) 4234 amdgpu_amdkfd_post_reset(tmp_adev); 4235 amdgpu_device_unlock_adev(tmp_adev); 4236 } 4237 4238 if (hive) 4239 mutex_unlock(&hive->reset_lock); 4240 4241 if (r) 4242 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); 4243 return r; 4244 } 4245 4246 /** 4247 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot 4248 * 4249 * @adev: amdgpu_device pointer 4250 * 4251 * Fetchs and stores in the driver the PCIE capabilities (gen speed 4252 * and lanes) of the slot the device is in. Handles APUs and 4253 * virtualized environments where PCIE config space may not be available. 4254 */ 4255 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 4256 { 4257 struct pci_dev *pdev; 4258 enum pci_bus_speed speed_cap, platform_speed_cap; 4259 enum pcie_link_width platform_link_width; 4260 4261 if (amdgpu_pcie_gen_cap) 4262 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 4263 4264 if (amdgpu_pcie_lane_cap) 4265 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; 4266 4267 /* covers APUs as well */ 4268 if (pci_is_root_bus(adev->pdev->bus)) { 4269 if (adev->pm.pcie_gen_mask == 0) 4270 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 4271 if (adev->pm.pcie_mlw_mask == 0) 4272 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 4273 return; 4274 } 4275 4276 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) 4277 return; 4278 4279 pcie_bandwidth_available(adev->pdev, NULL, 4280 &platform_speed_cap, &platform_link_width); 4281 4282 if (adev->pm.pcie_gen_mask == 0) { 4283 /* asic caps */ 4284 pdev = adev->pdev; 4285 speed_cap = pcie_get_speed_cap(pdev); 4286 if (speed_cap == PCI_SPEED_UNKNOWN) { 4287 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4288 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 4289 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 4290 } else { 4291 if (speed_cap == PCIE_SPEED_16_0GT) 4292 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4293 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 4294 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 4295 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4); 4296 else if (speed_cap == PCIE_SPEED_8_0GT) 4297 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4298 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 4299 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 4300 else if (speed_cap == PCIE_SPEED_5_0GT) 4301 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4302 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2); 4303 else 4304 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; 4305 } 4306 /* platform caps */ 4307 if (platform_speed_cap == PCI_SPEED_UNKNOWN) { 4308 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4309 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 4310 } else { 4311 if (platform_speed_cap == PCIE_SPEED_16_0GT) 4312 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4313 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 4314 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 4315 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); 4316 else if (platform_speed_cap == PCIE_SPEED_8_0GT) 4317 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4318 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 4319 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); 4320 else if (platform_speed_cap == PCIE_SPEED_5_0GT) 4321 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 4322 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 4323 else 4324 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 4325 4326 } 4327 } 4328 if (adev->pm.pcie_mlw_mask == 0) { 4329 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { 4330 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; 4331 } else { 4332 switch (platform_link_width) { 4333 case PCIE_LNK_X32: 4334 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 4335 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 4336 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 4337 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 4338 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 4339 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4340 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4341 break; 4342 case PCIE_LNK_X16: 4343 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 4344 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 4345 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 4346 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 4347 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4348 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4349 break; 4350 case PCIE_LNK_X12: 4351 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 4352 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 4353 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 4354 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4355 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4356 break; 4357 case PCIE_LNK_X8: 4358 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 4359 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 4360 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4361 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4362 break; 4363 case PCIE_LNK_X4: 4364 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 4365 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4366 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4367 break; 4368 case PCIE_LNK_X2: 4369 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 4370 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 4371 break; 4372 case PCIE_LNK_X1: 4373 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 4374 break; 4375 default: 4376 break; 4377 } 4378 } 4379 } 4380 } 4381 4382 int amdgpu_device_baco_enter(struct drm_device *dev) 4383 { 4384 struct amdgpu_device *adev = dev->dev_private; 4385 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 4386 4387 if (!amdgpu_device_supports_baco(adev->ddev)) 4388 return -ENOTSUPP; 4389 4390 if (ras && ras->supported) 4391 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); 4392 4393 return amdgpu_dpm_baco_enter(adev); 4394 } 4395 4396 int amdgpu_device_baco_exit(struct drm_device *dev) 4397 { 4398 struct amdgpu_device *adev = dev->dev_private; 4399 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 4400 int ret = 0; 4401 4402 if (!amdgpu_device_supports_baco(adev->ddev)) 4403 return -ENOTSUPP; 4404 4405 ret = amdgpu_dpm_baco_exit(adev); 4406 if (ret) 4407 return ret; 4408 4409 if (ras && ras->supported) 4410 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); 4411 4412 return 0; 4413 } 4414