1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/power_supply.h> 29 #include <linux/kthread.h> 30 #include <linux/module.h> 31 #include <linux/console.h> 32 #include <linux/slab.h> 33 #include <linux/iommu.h> 34 #include <linux/pci.h> 35 #include <linux/devcoredump.h> 36 #include <generated/utsrelease.h> 37 #include <linux/pci-p2pdma.h> 38 39 #include <drm/drm_atomic_helper.h> 40 #include <drm/drm_probe_helper.h> 41 #include <drm/amdgpu_drm.h> 42 #include <linux/vgaarb.h> 43 #include <linux/vga_switcheroo.h> 44 #include <linux/efi.h> 45 #include "amdgpu.h" 46 #include "amdgpu_trace.h" 47 #include "amdgpu_i2c.h" 48 #include "atom.h" 49 #include "amdgpu_atombios.h" 50 #include "amdgpu_atomfirmware.h" 51 #include "amd_pcie.h" 52 #ifdef CONFIG_DRM_AMDGPU_SI 53 #include "si.h" 54 #endif 55 #ifdef CONFIG_DRM_AMDGPU_CIK 56 #include "cik.h" 57 #endif 58 #include "vi.h" 59 #include "soc15.h" 60 #include "nv.h" 61 #include "bif/bif_4_1_d.h" 62 #include <linux/firmware.h> 63 #include "amdgpu_vf_error.h" 64 65 #include "amdgpu_amdkfd.h" 66 #include "amdgpu_pm.h" 67 68 #include "amdgpu_xgmi.h" 69 #include "amdgpu_ras.h" 70 #include "amdgpu_pmu.h" 71 #include "amdgpu_fru_eeprom.h" 72 #include "amdgpu_reset.h" 73 74 #include <linux/suspend.h> 75 #include <drm/task_barrier.h> 76 #include <linux/pm_runtime.h> 77 78 #include <drm/drm_drv.h> 79 80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); 82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); 83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); 84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); 85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); 86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); 87 88 #define AMDGPU_RESUME_MS 2000 89 #define AMDGPU_MAX_RETRY_LIMIT 2 90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL) 91 92 const char *amdgpu_asic_name[] = { 93 "TAHITI", 94 "PITCAIRN", 95 "VERDE", 96 "OLAND", 97 "HAINAN", 98 "BONAIRE", 99 "KAVERI", 100 "KABINI", 101 "HAWAII", 102 "MULLINS", 103 "TOPAZ", 104 "TONGA", 105 "FIJI", 106 "CARRIZO", 107 "STONEY", 108 "POLARIS10", 109 "POLARIS11", 110 "POLARIS12", 111 "VEGAM", 112 "VEGA10", 113 "VEGA12", 114 "VEGA20", 115 "RAVEN", 116 "ARCTURUS", 117 "RENOIR", 118 "ALDEBARAN", 119 "NAVI10", 120 "CYAN_SKILLFISH", 121 "NAVI14", 122 "NAVI12", 123 "SIENNA_CICHLID", 124 "NAVY_FLOUNDER", 125 "VANGOGH", 126 "DIMGREY_CAVEFISH", 127 "BEIGE_GOBY", 128 "YELLOW_CARP", 129 "IP DISCOVERY", 130 "LAST", 131 }; 132 133 /** 134 * DOC: pcie_replay_count 135 * 136 * The amdgpu driver provides a sysfs API for reporting the total number 137 * of PCIe replays (NAKs) 138 * The file pcie_replay_count is used for this and returns the total 139 * number of replays as a sum of the NAKs generated and NAKs received 140 */ 141 142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, 143 struct device_attribute *attr, char *buf) 144 { 145 struct drm_device *ddev = dev_get_drvdata(dev); 146 struct amdgpu_device *adev = drm_to_adev(ddev); 147 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); 148 149 return sysfs_emit(buf, "%llu\n", cnt); 150 } 151 152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO, 153 amdgpu_device_get_pcie_replay_count, NULL); 154 155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); 156 157 /** 158 * DOC: product_name 159 * 160 * The amdgpu driver provides a sysfs API for reporting the product name 161 * for the device 162 * The file serial_number is used for this and returns the product name 163 * as returned from the FRU. 164 * NOTE: This is only available for certain server cards 165 */ 166 167 static ssize_t amdgpu_device_get_product_name(struct device *dev, 168 struct device_attribute *attr, char *buf) 169 { 170 struct drm_device *ddev = dev_get_drvdata(dev); 171 struct amdgpu_device *adev = drm_to_adev(ddev); 172 173 return sysfs_emit(buf, "%s\n", adev->product_name); 174 } 175 176 static DEVICE_ATTR(product_name, S_IRUGO, 177 amdgpu_device_get_product_name, NULL); 178 179 /** 180 * DOC: product_number 181 * 182 * The amdgpu driver provides a sysfs API for reporting the part number 183 * for the device 184 * The file serial_number is used for this and returns the part number 185 * as returned from the FRU. 186 * NOTE: This is only available for certain server cards 187 */ 188 189 static ssize_t amdgpu_device_get_product_number(struct device *dev, 190 struct device_attribute *attr, char *buf) 191 { 192 struct drm_device *ddev = dev_get_drvdata(dev); 193 struct amdgpu_device *adev = drm_to_adev(ddev); 194 195 return sysfs_emit(buf, "%s\n", adev->product_number); 196 } 197 198 static DEVICE_ATTR(product_number, S_IRUGO, 199 amdgpu_device_get_product_number, NULL); 200 201 /** 202 * DOC: serial_number 203 * 204 * The amdgpu driver provides a sysfs API for reporting the serial number 205 * for the device 206 * The file serial_number is used for this and returns the serial number 207 * as returned from the FRU. 208 * NOTE: This is only available for certain server cards 209 */ 210 211 static ssize_t amdgpu_device_get_serial_number(struct device *dev, 212 struct device_attribute *attr, char *buf) 213 { 214 struct drm_device *ddev = dev_get_drvdata(dev); 215 struct amdgpu_device *adev = drm_to_adev(ddev); 216 217 return sysfs_emit(buf, "%s\n", adev->serial); 218 } 219 220 static DEVICE_ATTR(serial_number, S_IRUGO, 221 amdgpu_device_get_serial_number, NULL); 222 223 /** 224 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control 225 * 226 * @dev: drm_device pointer 227 * 228 * Returns true if the device is a dGPU with ATPX power control, 229 * otherwise return false. 230 */ 231 bool amdgpu_device_supports_px(struct drm_device *dev) 232 { 233 struct amdgpu_device *adev = drm_to_adev(dev); 234 235 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) 236 return true; 237 return false; 238 } 239 240 /** 241 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources 242 * 243 * @dev: drm_device pointer 244 * 245 * Returns true if the device is a dGPU with ACPI power control, 246 * otherwise return false. 247 */ 248 bool amdgpu_device_supports_boco(struct drm_device *dev) 249 { 250 struct amdgpu_device *adev = drm_to_adev(dev); 251 252 if (adev->has_pr3 || 253 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) 254 return true; 255 return false; 256 } 257 258 /** 259 * amdgpu_device_supports_baco - Does the device support BACO 260 * 261 * @dev: drm_device pointer 262 * 263 * Returns true if the device supporte BACO, 264 * otherwise return false. 265 */ 266 bool amdgpu_device_supports_baco(struct drm_device *dev) 267 { 268 struct amdgpu_device *adev = drm_to_adev(dev); 269 270 return amdgpu_asic_supports_baco(adev); 271 } 272 273 /** 274 * amdgpu_device_supports_smart_shift - Is the device dGPU with 275 * smart shift support 276 * 277 * @dev: drm_device pointer 278 * 279 * Returns true if the device is a dGPU with Smart Shift support, 280 * otherwise returns false. 281 */ 282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev) 283 { 284 return (amdgpu_device_supports_boco(dev) && 285 amdgpu_acpi_is_power_shift_control_supported()); 286 } 287 288 /* 289 * VRAM access helper functions 290 */ 291 292 /** 293 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA 294 * 295 * @adev: amdgpu_device pointer 296 * @pos: offset of the buffer in vram 297 * @buf: virtual address of the buffer in system memory 298 * @size: read/write size, sizeof(@buf) must > @size 299 * @write: true - write to vram, otherwise - read from vram 300 */ 301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, 302 void *buf, size_t size, bool write) 303 { 304 unsigned long flags; 305 uint32_t hi = ~0, tmp = 0; 306 uint32_t *data = buf; 307 uint64_t last; 308 int idx; 309 310 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 311 return; 312 313 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4)); 314 315 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 316 for (last = pos + size; pos < last; pos += 4) { 317 tmp = pos >> 31; 318 319 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); 320 if (tmp != hi) { 321 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); 322 hi = tmp; 323 } 324 if (write) 325 WREG32_NO_KIQ(mmMM_DATA, *data++); 326 else 327 *data++ = RREG32_NO_KIQ(mmMM_DATA); 328 } 329 330 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 331 drm_dev_exit(idx); 332 } 333 334 /** 335 * amdgpu_device_aper_access - access vram by vram aperature 336 * 337 * @adev: amdgpu_device pointer 338 * @pos: offset of the buffer in vram 339 * @buf: virtual address of the buffer in system memory 340 * @size: read/write size, sizeof(@buf) must > @size 341 * @write: true - write to vram, otherwise - read from vram 342 * 343 * The return value means how many bytes have been transferred. 344 */ 345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, 346 void *buf, size_t size, bool write) 347 { 348 #ifdef CONFIG_64BIT 349 void __iomem *addr; 350 size_t count = 0; 351 uint64_t last; 352 353 if (!adev->mman.aper_base_kaddr) 354 return 0; 355 356 last = min(pos + size, adev->gmc.visible_vram_size); 357 if (last > pos) { 358 addr = adev->mman.aper_base_kaddr + pos; 359 count = last - pos; 360 361 if (write) { 362 memcpy_toio(addr, buf, count); 363 mb(); 364 amdgpu_device_flush_hdp(adev, NULL); 365 } else { 366 amdgpu_device_invalidate_hdp(adev, NULL); 367 mb(); 368 memcpy_fromio(buf, addr, count); 369 } 370 371 } 372 373 return count; 374 #else 375 return 0; 376 #endif 377 } 378 379 /** 380 * amdgpu_device_vram_access - read/write a buffer in vram 381 * 382 * @adev: amdgpu_device pointer 383 * @pos: offset of the buffer in vram 384 * @buf: virtual address of the buffer in system memory 385 * @size: read/write size, sizeof(@buf) must > @size 386 * @write: true - write to vram, otherwise - read from vram 387 */ 388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 389 void *buf, size_t size, bool write) 390 { 391 size_t count; 392 393 /* try to using vram apreature to access vram first */ 394 count = amdgpu_device_aper_access(adev, pos, buf, size, write); 395 size -= count; 396 if (size) { 397 /* using MM to access rest vram */ 398 pos += count; 399 buf += count; 400 amdgpu_device_mm_access(adev, pos, buf, size, write); 401 } 402 } 403 404 /* 405 * register access helper functions. 406 */ 407 408 /* Check if hw access should be skipped because of hotplug or device error */ 409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) 410 { 411 if (adev->no_hw_access) 412 return true; 413 414 #ifdef CONFIG_LOCKDEP 415 /* 416 * This is a bit complicated to understand, so worth a comment. What we assert 417 * here is that the GPU reset is not running on another thread in parallel. 418 * 419 * For this we trylock the read side of the reset semaphore, if that succeeds 420 * we know that the reset is not running in paralell. 421 * 422 * If the trylock fails we assert that we are either already holding the read 423 * side of the lock or are the reset thread itself and hold the write side of 424 * the lock. 425 */ 426 if (in_task()) { 427 if (down_read_trylock(&adev->reset_domain->sem)) 428 up_read(&adev->reset_domain->sem); 429 else 430 lockdep_assert_held(&adev->reset_domain->sem); 431 } 432 #endif 433 return false; 434 } 435 436 /** 437 * amdgpu_device_rreg - read a memory mapped IO or indirect register 438 * 439 * @adev: amdgpu_device pointer 440 * @reg: dword aligned register offset 441 * @acc_flags: access flags which require special behavior 442 * 443 * Returns the 32 bit value from the offset specified. 444 */ 445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, 446 uint32_t reg, uint32_t acc_flags) 447 { 448 uint32_t ret; 449 450 if (amdgpu_device_skip_hw_access(adev)) 451 return 0; 452 453 if ((reg * 4) < adev->rmmio_size) { 454 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 455 amdgpu_sriov_runtime(adev) && 456 down_read_trylock(&adev->reset_domain->sem)) { 457 ret = amdgpu_kiq_rreg(adev, reg); 458 up_read(&adev->reset_domain->sem); 459 } else { 460 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 461 } 462 } else { 463 ret = adev->pcie_rreg(adev, reg * 4); 464 } 465 466 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); 467 468 return ret; 469 } 470 471 /* 472 * MMIO register read with bytes helper functions 473 * @offset:bytes offset from MMIO start 474 * 475 */ 476 477 /** 478 * amdgpu_mm_rreg8 - read a memory mapped IO register 479 * 480 * @adev: amdgpu_device pointer 481 * @offset: byte aligned register offset 482 * 483 * Returns the 8 bit value from the offset specified. 484 */ 485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) 486 { 487 if (amdgpu_device_skip_hw_access(adev)) 488 return 0; 489 490 if (offset < adev->rmmio_size) 491 return (readb(adev->rmmio + offset)); 492 BUG(); 493 } 494 495 /* 496 * MMIO register write with bytes helper functions 497 * @offset:bytes offset from MMIO start 498 * @value: the value want to be written to the register 499 * 500 */ 501 /** 502 * amdgpu_mm_wreg8 - read a memory mapped IO register 503 * 504 * @adev: amdgpu_device pointer 505 * @offset: byte aligned register offset 506 * @value: 8 bit value to write 507 * 508 * Writes the value specified to the offset specified. 509 */ 510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) 511 { 512 if (amdgpu_device_skip_hw_access(adev)) 513 return; 514 515 if (offset < adev->rmmio_size) 516 writeb(value, adev->rmmio + offset); 517 else 518 BUG(); 519 } 520 521 /** 522 * amdgpu_device_wreg - write to a memory mapped IO or indirect register 523 * 524 * @adev: amdgpu_device pointer 525 * @reg: dword aligned register offset 526 * @v: 32 bit value to write to the register 527 * @acc_flags: access flags which require special behavior 528 * 529 * Writes the value specified to the offset specified. 530 */ 531 void amdgpu_device_wreg(struct amdgpu_device *adev, 532 uint32_t reg, uint32_t v, 533 uint32_t acc_flags) 534 { 535 if (amdgpu_device_skip_hw_access(adev)) 536 return; 537 538 if ((reg * 4) < adev->rmmio_size) { 539 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 540 amdgpu_sriov_runtime(adev) && 541 down_read_trylock(&adev->reset_domain->sem)) { 542 amdgpu_kiq_wreg(adev, reg, v); 543 up_read(&adev->reset_domain->sem); 544 } else { 545 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 546 } 547 } else { 548 adev->pcie_wreg(adev, reg * 4, v); 549 } 550 551 trace_amdgpu_device_wreg(adev->pdev->device, reg, v); 552 } 553 554 /** 555 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range 556 * 557 * @adev: amdgpu_device pointer 558 * @reg: mmio/rlc register 559 * @v: value to write 560 * 561 * this function is invoked only for the debugfs register access 562 */ 563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, 564 uint32_t reg, uint32_t v) 565 { 566 if (amdgpu_device_skip_hw_access(adev)) 567 return; 568 569 if (amdgpu_sriov_fullaccess(adev) && 570 adev->gfx.rlc.funcs && 571 adev->gfx.rlc.funcs->is_rlcg_access_range) { 572 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) 573 return amdgpu_sriov_wreg(adev, reg, v, 0, 0); 574 } else if ((reg * 4) >= adev->rmmio_size) { 575 adev->pcie_wreg(adev, reg * 4, v); 576 } else { 577 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 578 } 579 } 580 581 /** 582 * amdgpu_mm_rdoorbell - read a doorbell dword 583 * 584 * @adev: amdgpu_device pointer 585 * @index: doorbell index 586 * 587 * Returns the value in the doorbell aperture at the 588 * requested doorbell index (CIK). 589 */ 590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) 591 { 592 if (amdgpu_device_skip_hw_access(adev)) 593 return 0; 594 595 if (index < adev->doorbell.num_doorbells) { 596 return readl(adev->doorbell.ptr + index); 597 } else { 598 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 599 return 0; 600 } 601 } 602 603 /** 604 * amdgpu_mm_wdoorbell - write a doorbell dword 605 * 606 * @adev: amdgpu_device pointer 607 * @index: doorbell index 608 * @v: value to write 609 * 610 * Writes @v to the doorbell aperture at the 611 * requested doorbell index (CIK). 612 */ 613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) 614 { 615 if (amdgpu_device_skip_hw_access(adev)) 616 return; 617 618 if (index < adev->doorbell.num_doorbells) { 619 writel(v, adev->doorbell.ptr + index); 620 } else { 621 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 622 } 623 } 624 625 /** 626 * amdgpu_mm_rdoorbell64 - read a doorbell Qword 627 * 628 * @adev: amdgpu_device pointer 629 * @index: doorbell index 630 * 631 * Returns the value in the doorbell aperture at the 632 * requested doorbell index (VEGA10+). 633 */ 634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) 635 { 636 if (amdgpu_device_skip_hw_access(adev)) 637 return 0; 638 639 if (index < adev->doorbell.num_doorbells) { 640 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); 641 } else { 642 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 643 return 0; 644 } 645 } 646 647 /** 648 * amdgpu_mm_wdoorbell64 - write a doorbell Qword 649 * 650 * @adev: amdgpu_device pointer 651 * @index: doorbell index 652 * @v: value to write 653 * 654 * Writes @v to the doorbell aperture at the 655 * requested doorbell index (VEGA10+). 656 */ 657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) 658 { 659 if (amdgpu_device_skip_hw_access(adev)) 660 return; 661 662 if (index < adev->doorbell.num_doorbells) { 663 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); 664 } else { 665 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 666 } 667 } 668 669 /** 670 * amdgpu_device_indirect_rreg - read an indirect register 671 * 672 * @adev: amdgpu_device pointer 673 * @pcie_index: mmio register offset 674 * @pcie_data: mmio register offset 675 * @reg_addr: indirect register address to read from 676 * 677 * Returns the value of indirect register @reg_addr 678 */ 679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, 680 u32 pcie_index, u32 pcie_data, 681 u32 reg_addr) 682 { 683 unsigned long flags; 684 u32 r; 685 void __iomem *pcie_index_offset; 686 void __iomem *pcie_data_offset; 687 688 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 689 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 690 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 691 692 writel(reg_addr, pcie_index_offset); 693 readl(pcie_index_offset); 694 r = readl(pcie_data_offset); 695 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 696 697 return r; 698 } 699 700 /** 701 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register 702 * 703 * @adev: amdgpu_device pointer 704 * @pcie_index: mmio register offset 705 * @pcie_data: mmio register offset 706 * @reg_addr: indirect register address to read from 707 * 708 * Returns the value of indirect register @reg_addr 709 */ 710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, 711 u32 pcie_index, u32 pcie_data, 712 u32 reg_addr) 713 { 714 unsigned long flags; 715 u64 r; 716 void __iomem *pcie_index_offset; 717 void __iomem *pcie_data_offset; 718 719 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 720 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 721 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 722 723 /* read low 32 bits */ 724 writel(reg_addr, pcie_index_offset); 725 readl(pcie_index_offset); 726 r = readl(pcie_data_offset); 727 /* read high 32 bits */ 728 writel(reg_addr + 4, pcie_index_offset); 729 readl(pcie_index_offset); 730 r |= ((u64)readl(pcie_data_offset) << 32); 731 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 732 733 return r; 734 } 735 736 /** 737 * amdgpu_device_indirect_wreg - write an indirect register address 738 * 739 * @adev: amdgpu_device pointer 740 * @pcie_index: mmio register offset 741 * @pcie_data: mmio register offset 742 * @reg_addr: indirect register offset 743 * @reg_data: indirect register data 744 * 745 */ 746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, 747 u32 pcie_index, u32 pcie_data, 748 u32 reg_addr, u32 reg_data) 749 { 750 unsigned long flags; 751 void __iomem *pcie_index_offset; 752 void __iomem *pcie_data_offset; 753 754 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 755 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 756 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 757 758 writel(reg_addr, pcie_index_offset); 759 readl(pcie_index_offset); 760 writel(reg_data, pcie_data_offset); 761 readl(pcie_data_offset); 762 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 763 } 764 765 /** 766 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address 767 * 768 * @adev: amdgpu_device pointer 769 * @pcie_index: mmio register offset 770 * @pcie_data: mmio register offset 771 * @reg_addr: indirect register offset 772 * @reg_data: indirect register data 773 * 774 */ 775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, 776 u32 pcie_index, u32 pcie_data, 777 u32 reg_addr, u64 reg_data) 778 { 779 unsigned long flags; 780 void __iomem *pcie_index_offset; 781 void __iomem *pcie_data_offset; 782 783 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 784 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 785 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 786 787 /* write low 32 bits */ 788 writel(reg_addr, pcie_index_offset); 789 readl(pcie_index_offset); 790 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset); 791 readl(pcie_data_offset); 792 /* write high 32 bits */ 793 writel(reg_addr + 4, pcie_index_offset); 794 readl(pcie_index_offset); 795 writel((u32)(reg_data >> 32), pcie_data_offset); 796 readl(pcie_data_offset); 797 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 798 } 799 800 /** 801 * amdgpu_invalid_rreg - dummy reg read function 802 * 803 * @adev: amdgpu_device pointer 804 * @reg: offset of register 805 * 806 * Dummy register read function. Used for register blocks 807 * that certain asics don't have (all asics). 808 * Returns the value in the register. 809 */ 810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) 811 { 812 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 813 BUG(); 814 return 0; 815 } 816 817 /** 818 * amdgpu_invalid_wreg - dummy reg write function 819 * 820 * @adev: amdgpu_device pointer 821 * @reg: offset of register 822 * @v: value to write to the register 823 * 824 * Dummy register read function. Used for register blocks 825 * that certain asics don't have (all asics). 826 */ 827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 828 { 829 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 830 reg, v); 831 BUG(); 832 } 833 834 /** 835 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function 836 * 837 * @adev: amdgpu_device pointer 838 * @reg: offset of register 839 * 840 * Dummy register read function. Used for register blocks 841 * that certain asics don't have (all asics). 842 * Returns the value in the register. 843 */ 844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) 845 { 846 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); 847 BUG(); 848 return 0; 849 } 850 851 /** 852 * amdgpu_invalid_wreg64 - dummy reg write function 853 * 854 * @adev: amdgpu_device pointer 855 * @reg: offset of register 856 * @v: value to write to the register 857 * 858 * Dummy register read function. Used for register blocks 859 * that certain asics don't have (all asics). 860 */ 861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) 862 { 863 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", 864 reg, v); 865 BUG(); 866 } 867 868 /** 869 * amdgpu_block_invalid_rreg - dummy reg read function 870 * 871 * @adev: amdgpu_device pointer 872 * @block: offset of instance 873 * @reg: offset of register 874 * 875 * Dummy register read function. Used for register blocks 876 * that certain asics don't have (all asics). 877 * Returns the value in the register. 878 */ 879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, 880 uint32_t block, uint32_t reg) 881 { 882 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", 883 reg, block); 884 BUG(); 885 return 0; 886 } 887 888 /** 889 * amdgpu_block_invalid_wreg - dummy reg write function 890 * 891 * @adev: amdgpu_device pointer 892 * @block: offset of instance 893 * @reg: offset of register 894 * @v: value to write to the register 895 * 896 * Dummy register read function. Used for register blocks 897 * that certain asics don't have (all asics). 898 */ 899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, 900 uint32_t block, 901 uint32_t reg, uint32_t v) 902 { 903 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 904 reg, block, v); 905 BUG(); 906 } 907 908 /** 909 * amdgpu_device_asic_init - Wrapper for atom asic_init 910 * 911 * @adev: amdgpu_device pointer 912 * 913 * Does any asic specific work and then calls atom asic init. 914 */ 915 static int amdgpu_device_asic_init(struct amdgpu_device *adev) 916 { 917 amdgpu_asic_pre_asic_init(adev); 918 919 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) 920 return amdgpu_atomfirmware_asic_init(adev, true); 921 else 922 return amdgpu_atom_asic_init(adev->mode_info.atom_context); 923 } 924 925 /** 926 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page 927 * 928 * @adev: amdgpu_device pointer 929 * 930 * Allocates a scratch page of VRAM for use by various things in the 931 * driver. 932 */ 933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) 934 { 935 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, 936 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 937 &adev->vram_scratch.robj, 938 &adev->vram_scratch.gpu_addr, 939 (void **)&adev->vram_scratch.ptr); 940 } 941 942 /** 943 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page 944 * 945 * @adev: amdgpu_device pointer 946 * 947 * Frees the VRAM scratch page. 948 */ 949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) 950 { 951 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); 952 } 953 954 /** 955 * amdgpu_device_program_register_sequence - program an array of registers. 956 * 957 * @adev: amdgpu_device pointer 958 * @registers: pointer to the register array 959 * @array_size: size of the register array 960 * 961 * Programs an array or registers with and and or masks. 962 * This is a helper for setting golden registers. 963 */ 964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 965 const u32 *registers, 966 const u32 array_size) 967 { 968 u32 tmp, reg, and_mask, or_mask; 969 int i; 970 971 if (array_size % 3) 972 return; 973 974 for (i = 0; i < array_size; i +=3) { 975 reg = registers[i + 0]; 976 and_mask = registers[i + 1]; 977 or_mask = registers[i + 2]; 978 979 if (and_mask == 0xffffffff) { 980 tmp = or_mask; 981 } else { 982 tmp = RREG32(reg); 983 tmp &= ~and_mask; 984 if (adev->family >= AMDGPU_FAMILY_AI) 985 tmp |= (or_mask & and_mask); 986 else 987 tmp |= or_mask; 988 } 989 WREG32(reg, tmp); 990 } 991 } 992 993 /** 994 * amdgpu_device_pci_config_reset - reset the GPU 995 * 996 * @adev: amdgpu_device pointer 997 * 998 * Resets the GPU using the pci config reset sequence. 999 * Only applicable to asics prior to vega10. 1000 */ 1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) 1002 { 1003 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); 1004 } 1005 1006 /** 1007 * amdgpu_device_pci_reset - reset the GPU using generic PCI means 1008 * 1009 * @adev: amdgpu_device pointer 1010 * 1011 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.). 1012 */ 1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev) 1014 { 1015 return pci_reset_function(adev->pdev); 1016 } 1017 1018 /* 1019 * GPU doorbell aperture helpers function. 1020 */ 1021 /** 1022 * amdgpu_device_doorbell_init - Init doorbell driver information. 1023 * 1024 * @adev: amdgpu_device pointer 1025 * 1026 * Init doorbell driver information (CIK) 1027 * Returns 0 on success, error on failure. 1028 */ 1029 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) 1030 { 1031 1032 /* No doorbell on SI hardware generation */ 1033 if (adev->asic_type < CHIP_BONAIRE) { 1034 adev->doorbell.base = 0; 1035 adev->doorbell.size = 0; 1036 adev->doorbell.num_doorbells = 0; 1037 adev->doorbell.ptr = NULL; 1038 return 0; 1039 } 1040 1041 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) 1042 return -EINVAL; 1043 1044 amdgpu_asic_init_doorbell_index(adev); 1045 1046 /* doorbell bar mapping */ 1047 adev->doorbell.base = pci_resource_start(adev->pdev, 2); 1048 adev->doorbell.size = pci_resource_len(adev->pdev, 2); 1049 1050 if (adev->enable_mes) { 1051 adev->doorbell.num_doorbells = 1052 adev->doorbell.size / sizeof(u32); 1053 } else { 1054 adev->doorbell.num_doorbells = 1055 min_t(u32, adev->doorbell.size / sizeof(u32), 1056 adev->doorbell_index.max_assignment+1); 1057 if (adev->doorbell.num_doorbells == 0) 1058 return -EINVAL; 1059 1060 /* For Vega, reserve and map two pages on doorbell BAR since SDMA 1061 * paging queue doorbell use the second page. The 1062 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the 1063 * doorbells are in the first page. So with paging queue enabled, 1064 * the max num_doorbells should + 1 page (0x400 in dword) 1065 */ 1066 if (adev->asic_type >= CHIP_VEGA10) 1067 adev->doorbell.num_doorbells += 0x400; 1068 } 1069 1070 adev->doorbell.ptr = ioremap(adev->doorbell.base, 1071 adev->doorbell.num_doorbells * 1072 sizeof(u32)); 1073 if (adev->doorbell.ptr == NULL) 1074 return -ENOMEM; 1075 1076 return 0; 1077 } 1078 1079 /** 1080 * amdgpu_device_doorbell_fini - Tear down doorbell driver information. 1081 * 1082 * @adev: amdgpu_device pointer 1083 * 1084 * Tear down doorbell driver information (CIK) 1085 */ 1086 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) 1087 { 1088 iounmap(adev->doorbell.ptr); 1089 adev->doorbell.ptr = NULL; 1090 } 1091 1092 1093 1094 /* 1095 * amdgpu_device_wb_*() 1096 * Writeback is the method by which the GPU updates special pages in memory 1097 * with the status of certain GPU events (fences, ring pointers,etc.). 1098 */ 1099 1100 /** 1101 * amdgpu_device_wb_fini - Disable Writeback and free memory 1102 * 1103 * @adev: amdgpu_device pointer 1104 * 1105 * Disables Writeback and frees the Writeback memory (all asics). 1106 * Used at driver shutdown. 1107 */ 1108 static void amdgpu_device_wb_fini(struct amdgpu_device *adev) 1109 { 1110 if (adev->wb.wb_obj) { 1111 amdgpu_bo_free_kernel(&adev->wb.wb_obj, 1112 &adev->wb.gpu_addr, 1113 (void **)&adev->wb.wb); 1114 adev->wb.wb_obj = NULL; 1115 } 1116 } 1117 1118 /** 1119 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory 1120 * 1121 * @adev: amdgpu_device pointer 1122 * 1123 * Initializes writeback and allocates writeback memory (all asics). 1124 * Used at driver startup. 1125 * Returns 0 on success or an -error on failure. 1126 */ 1127 static int amdgpu_device_wb_init(struct amdgpu_device *adev) 1128 { 1129 int r; 1130 1131 if (adev->wb.wb_obj == NULL) { 1132 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ 1133 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, 1134 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1135 &adev->wb.wb_obj, &adev->wb.gpu_addr, 1136 (void **)&adev->wb.wb); 1137 if (r) { 1138 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 1139 return r; 1140 } 1141 1142 adev->wb.num_wb = AMDGPU_MAX_WB; 1143 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 1144 1145 /* clear wb memory */ 1146 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); 1147 } 1148 1149 return 0; 1150 } 1151 1152 /** 1153 * amdgpu_device_wb_get - Allocate a wb entry 1154 * 1155 * @adev: amdgpu_device pointer 1156 * @wb: wb index 1157 * 1158 * Allocate a wb slot for use by the driver (all asics). 1159 * Returns 0 on success or -EINVAL on failure. 1160 */ 1161 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) 1162 { 1163 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); 1164 1165 if (offset < adev->wb.num_wb) { 1166 __set_bit(offset, adev->wb.used); 1167 *wb = offset << 3; /* convert to dw offset */ 1168 return 0; 1169 } else { 1170 return -EINVAL; 1171 } 1172 } 1173 1174 /** 1175 * amdgpu_device_wb_free - Free a wb entry 1176 * 1177 * @adev: amdgpu_device pointer 1178 * @wb: wb index 1179 * 1180 * Free a wb slot allocated for use by the driver (all asics) 1181 */ 1182 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) 1183 { 1184 wb >>= 3; 1185 if (wb < adev->wb.num_wb) 1186 __clear_bit(wb, adev->wb.used); 1187 } 1188 1189 /** 1190 * amdgpu_device_resize_fb_bar - try to resize FB BAR 1191 * 1192 * @adev: amdgpu_device pointer 1193 * 1194 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not 1195 * to fail, but if any of the BARs is not accessible after the size we abort 1196 * driver loading by returning -ENODEV. 1197 */ 1198 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) 1199 { 1200 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); 1201 struct pci_bus *root; 1202 struct resource *res; 1203 unsigned i; 1204 u16 cmd; 1205 int r; 1206 1207 /* Bypass for VF */ 1208 if (amdgpu_sriov_vf(adev)) 1209 return 0; 1210 1211 /* skip if the bios has already enabled large BAR */ 1212 if (adev->gmc.real_vram_size && 1213 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) 1214 return 0; 1215 1216 /* Check if the root BUS has 64bit memory resources */ 1217 root = adev->pdev->bus; 1218 while (root->parent) 1219 root = root->parent; 1220 1221 pci_bus_for_each_resource(root, res, i) { 1222 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && 1223 res->start > 0x100000000ull) 1224 break; 1225 } 1226 1227 /* Trying to resize is pointless without a root hub window above 4GB */ 1228 if (!res) 1229 return 0; 1230 1231 /* Limit the BAR size to what is available */ 1232 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, 1233 rbar_size); 1234 1235 /* Disable memory decoding while we change the BAR addresses and size */ 1236 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); 1237 pci_write_config_word(adev->pdev, PCI_COMMAND, 1238 cmd & ~PCI_COMMAND_MEMORY); 1239 1240 /* Free the VRAM and doorbell BAR, we most likely need to move both. */ 1241 amdgpu_device_doorbell_fini(adev); 1242 if (adev->asic_type >= CHIP_BONAIRE) 1243 pci_release_resource(adev->pdev, 2); 1244 1245 pci_release_resource(adev->pdev, 0); 1246 1247 r = pci_resize_resource(adev->pdev, 0, rbar_size); 1248 if (r == -ENOSPC) 1249 DRM_INFO("Not enough PCI address space for a large BAR."); 1250 else if (r && r != -ENOTSUPP) 1251 DRM_ERROR("Problem resizing BAR0 (%d).", r); 1252 1253 pci_assign_unassigned_bus_resources(adev->pdev->bus); 1254 1255 /* When the doorbell or fb BAR isn't available we have no chance of 1256 * using the device. 1257 */ 1258 r = amdgpu_device_doorbell_init(adev); 1259 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) 1260 return -ENODEV; 1261 1262 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); 1263 1264 return 0; 1265 } 1266 1267 /* 1268 * GPU helpers function. 1269 */ 1270 /** 1271 * amdgpu_device_need_post - check if the hw need post or not 1272 * 1273 * @adev: amdgpu_device pointer 1274 * 1275 * Check if the asic has been initialized (all asics) at driver startup 1276 * or post is needed if hw reset is performed. 1277 * Returns true if need or false if not. 1278 */ 1279 bool amdgpu_device_need_post(struct amdgpu_device *adev) 1280 { 1281 uint32_t reg; 1282 1283 if (amdgpu_sriov_vf(adev)) 1284 return false; 1285 1286 if (amdgpu_passthrough(adev)) { 1287 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 1288 * some old smc fw still need driver do vPost otherwise gpu hang, while 1289 * those smc fw version above 22.15 doesn't have this flaw, so we force 1290 * vpost executed for smc version below 22.15 1291 */ 1292 if (adev->asic_type == CHIP_FIJI) { 1293 int err; 1294 uint32_t fw_ver; 1295 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); 1296 /* force vPost if error occured */ 1297 if (err) 1298 return true; 1299 1300 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 1301 if (fw_ver < 0x00160e00) 1302 return true; 1303 } 1304 } 1305 1306 /* Don't post if we need to reset whole hive on init */ 1307 if (adev->gmc.xgmi.pending_reset) 1308 return false; 1309 1310 if (adev->has_hw_reset) { 1311 adev->has_hw_reset = false; 1312 return true; 1313 } 1314 1315 /* bios scratch used on CIK+ */ 1316 if (adev->asic_type >= CHIP_BONAIRE) 1317 return amdgpu_atombios_scratch_need_asic_init(adev); 1318 1319 /* check MEM_SIZE for older asics */ 1320 reg = amdgpu_asic_get_config_memsize(adev); 1321 1322 if ((reg != 0) && (reg != 0xffffffff)) 1323 return false; 1324 1325 return true; 1326 } 1327 1328 /** 1329 * amdgpu_device_should_use_aspm - check if the device should program ASPM 1330 * 1331 * @adev: amdgpu_device pointer 1332 * 1333 * Confirm whether the module parameter and pcie bridge agree that ASPM should 1334 * be set for this device. 1335 * 1336 * Returns true if it should be used or false if not. 1337 */ 1338 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) 1339 { 1340 switch (amdgpu_aspm) { 1341 case -1: 1342 break; 1343 case 0: 1344 return false; 1345 case 1: 1346 return true; 1347 default: 1348 return false; 1349 } 1350 return pcie_aspm_enabled(adev->pdev); 1351 } 1352 1353 /* if we get transitioned to only one device, take VGA back */ 1354 /** 1355 * amdgpu_device_vga_set_decode - enable/disable vga decode 1356 * 1357 * @pdev: PCI device pointer 1358 * @state: enable/disable vga decode 1359 * 1360 * Enable/disable vga decode (all asics). 1361 * Returns VGA resource flags. 1362 */ 1363 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, 1364 bool state) 1365 { 1366 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); 1367 amdgpu_asic_set_vga_state(adev, state); 1368 if (state) 1369 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1370 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1371 else 1372 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1373 } 1374 1375 /** 1376 * amdgpu_device_check_block_size - validate the vm block size 1377 * 1378 * @adev: amdgpu_device pointer 1379 * 1380 * Validates the vm block size specified via module parameter. 1381 * The vm block size defines number of bits in page table versus page directory, 1382 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1383 * page table and the remaining bits are in the page directory. 1384 */ 1385 static void amdgpu_device_check_block_size(struct amdgpu_device *adev) 1386 { 1387 /* defines number of bits in page table versus page directory, 1388 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1389 * page table and the remaining bits are in the page directory */ 1390 if (amdgpu_vm_block_size == -1) 1391 return; 1392 1393 if (amdgpu_vm_block_size < 9) { 1394 dev_warn(adev->dev, "VM page table size (%d) too small\n", 1395 amdgpu_vm_block_size); 1396 amdgpu_vm_block_size = -1; 1397 } 1398 } 1399 1400 /** 1401 * amdgpu_device_check_vm_size - validate the vm size 1402 * 1403 * @adev: amdgpu_device pointer 1404 * 1405 * Validates the vm size in GB specified via module parameter. 1406 * The VM size is the size of the GPU virtual memory space in GB. 1407 */ 1408 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) 1409 { 1410 /* no need to check the default value */ 1411 if (amdgpu_vm_size == -1) 1412 return; 1413 1414 if (amdgpu_vm_size < 1) { 1415 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", 1416 amdgpu_vm_size); 1417 amdgpu_vm_size = -1; 1418 } 1419 } 1420 1421 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) 1422 { 1423 struct sysinfo si; 1424 bool is_os_64 = (sizeof(void *) == 8); 1425 uint64_t total_memory; 1426 uint64_t dram_size_seven_GB = 0x1B8000000; 1427 uint64_t dram_size_three_GB = 0xB8000000; 1428 1429 if (amdgpu_smu_memory_pool_size == 0) 1430 return; 1431 1432 if (!is_os_64) { 1433 DRM_WARN("Not 64-bit OS, feature not supported\n"); 1434 goto def_value; 1435 } 1436 si_meminfo(&si); 1437 total_memory = (uint64_t)si.totalram * si.mem_unit; 1438 1439 if ((amdgpu_smu_memory_pool_size == 1) || 1440 (amdgpu_smu_memory_pool_size == 2)) { 1441 if (total_memory < dram_size_three_GB) 1442 goto def_value1; 1443 } else if ((amdgpu_smu_memory_pool_size == 4) || 1444 (amdgpu_smu_memory_pool_size == 8)) { 1445 if (total_memory < dram_size_seven_GB) 1446 goto def_value1; 1447 } else { 1448 DRM_WARN("Smu memory pool size not supported\n"); 1449 goto def_value; 1450 } 1451 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; 1452 1453 return; 1454 1455 def_value1: 1456 DRM_WARN("No enough system memory\n"); 1457 def_value: 1458 adev->pm.smu_prv_buffer_size = 0; 1459 } 1460 1461 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) 1462 { 1463 if (!(adev->flags & AMD_IS_APU) || 1464 adev->asic_type < CHIP_RAVEN) 1465 return 0; 1466 1467 switch (adev->asic_type) { 1468 case CHIP_RAVEN: 1469 if (adev->pdev->device == 0x15dd) 1470 adev->apu_flags |= AMD_APU_IS_RAVEN; 1471 if (adev->pdev->device == 0x15d8) 1472 adev->apu_flags |= AMD_APU_IS_PICASSO; 1473 break; 1474 case CHIP_RENOIR: 1475 if ((adev->pdev->device == 0x1636) || 1476 (adev->pdev->device == 0x164c)) 1477 adev->apu_flags |= AMD_APU_IS_RENOIR; 1478 else 1479 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; 1480 break; 1481 case CHIP_VANGOGH: 1482 adev->apu_flags |= AMD_APU_IS_VANGOGH; 1483 break; 1484 case CHIP_YELLOW_CARP: 1485 break; 1486 case CHIP_CYAN_SKILLFISH: 1487 if ((adev->pdev->device == 0x13FE) || 1488 (adev->pdev->device == 0x143F)) 1489 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; 1490 break; 1491 default: 1492 break; 1493 } 1494 1495 return 0; 1496 } 1497 1498 /** 1499 * amdgpu_device_check_arguments - validate module params 1500 * 1501 * @adev: amdgpu_device pointer 1502 * 1503 * Validates certain module parameters and updates 1504 * the associated values used by the driver (all asics). 1505 */ 1506 static int amdgpu_device_check_arguments(struct amdgpu_device *adev) 1507 { 1508 if (amdgpu_sched_jobs < 4) { 1509 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 1510 amdgpu_sched_jobs); 1511 amdgpu_sched_jobs = 4; 1512 } else if (!is_power_of_2(amdgpu_sched_jobs)){ 1513 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 1514 amdgpu_sched_jobs); 1515 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1516 } 1517 1518 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 1519 /* gart size must be greater or equal to 32M */ 1520 dev_warn(adev->dev, "gart size (%d) too small\n", 1521 amdgpu_gart_size); 1522 amdgpu_gart_size = -1; 1523 } 1524 1525 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { 1526 /* gtt size must be greater or equal to 32M */ 1527 dev_warn(adev->dev, "gtt size (%d) too small\n", 1528 amdgpu_gtt_size); 1529 amdgpu_gtt_size = -1; 1530 } 1531 1532 /* valid range is between 4 and 9 inclusive */ 1533 if (amdgpu_vm_fragment_size != -1 && 1534 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { 1535 dev_warn(adev->dev, "valid range is between 4 and 9\n"); 1536 amdgpu_vm_fragment_size = -1; 1537 } 1538 1539 if (amdgpu_sched_hw_submission < 2) { 1540 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n", 1541 amdgpu_sched_hw_submission); 1542 amdgpu_sched_hw_submission = 2; 1543 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) { 1544 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n", 1545 amdgpu_sched_hw_submission); 1546 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission); 1547 } 1548 1549 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) { 1550 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n"); 1551 amdgpu_reset_method = -1; 1552 } 1553 1554 amdgpu_device_check_smu_prv_buffer_size(adev); 1555 1556 amdgpu_device_check_vm_size(adev); 1557 1558 amdgpu_device_check_block_size(adev); 1559 1560 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1561 1562 return 0; 1563 } 1564 1565 /** 1566 * amdgpu_switcheroo_set_state - set switcheroo state 1567 * 1568 * @pdev: pci dev pointer 1569 * @state: vga_switcheroo state 1570 * 1571 * Callback for the switcheroo driver. Suspends or resumes the 1572 * the asics before or after it is powered up using ACPI methods. 1573 */ 1574 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, 1575 enum vga_switcheroo_state state) 1576 { 1577 struct drm_device *dev = pci_get_drvdata(pdev); 1578 int r; 1579 1580 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF) 1581 return; 1582 1583 if (state == VGA_SWITCHEROO_ON) { 1584 pr_info("switched on\n"); 1585 /* don't suspend or resume card normally */ 1586 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1587 1588 pci_set_power_state(pdev, PCI_D0); 1589 amdgpu_device_load_pci_state(pdev); 1590 r = pci_enable_device(pdev); 1591 if (r) 1592 DRM_WARN("pci_enable_device failed (%d)\n", r); 1593 amdgpu_device_resume(dev, true); 1594 1595 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1596 } else { 1597 pr_info("switched off\n"); 1598 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1599 amdgpu_device_suspend(dev, true); 1600 amdgpu_device_cache_pci_state(pdev); 1601 /* Shut down the device */ 1602 pci_disable_device(pdev); 1603 pci_set_power_state(pdev, PCI_D3cold); 1604 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1605 } 1606 } 1607 1608 /** 1609 * amdgpu_switcheroo_can_switch - see if switcheroo state can change 1610 * 1611 * @pdev: pci dev pointer 1612 * 1613 * Callback for the switcheroo driver. Check of the switcheroo 1614 * state can be changed. 1615 * Returns true if the state can be changed, false if not. 1616 */ 1617 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) 1618 { 1619 struct drm_device *dev = pci_get_drvdata(pdev); 1620 1621 /* 1622 * FIXME: open_count is protected by drm_global_mutex but that would lead to 1623 * locking inversion with the driver load path. And the access here is 1624 * completely racy anyway. So don't bother with locking for now. 1625 */ 1626 return atomic_read(&dev->open_count) == 0; 1627 } 1628 1629 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { 1630 .set_gpu_state = amdgpu_switcheroo_set_state, 1631 .reprobe = NULL, 1632 .can_switch = amdgpu_switcheroo_can_switch, 1633 }; 1634 1635 /** 1636 * amdgpu_device_ip_set_clockgating_state - set the CG state 1637 * 1638 * @dev: amdgpu_device pointer 1639 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1640 * @state: clockgating state (gate or ungate) 1641 * 1642 * Sets the requested clockgating state for all instances of 1643 * the hardware IP specified. 1644 * Returns the error code from the last instance. 1645 */ 1646 int amdgpu_device_ip_set_clockgating_state(void *dev, 1647 enum amd_ip_block_type block_type, 1648 enum amd_clockgating_state state) 1649 { 1650 struct amdgpu_device *adev = dev; 1651 int i, r = 0; 1652 1653 for (i = 0; i < adev->num_ip_blocks; i++) { 1654 if (!adev->ip_blocks[i].status.valid) 1655 continue; 1656 if (adev->ip_blocks[i].version->type != block_type) 1657 continue; 1658 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) 1659 continue; 1660 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 1661 (void *)adev, state); 1662 if (r) 1663 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", 1664 adev->ip_blocks[i].version->funcs->name, r); 1665 } 1666 return r; 1667 } 1668 1669 /** 1670 * amdgpu_device_ip_set_powergating_state - set the PG state 1671 * 1672 * @dev: amdgpu_device pointer 1673 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1674 * @state: powergating state (gate or ungate) 1675 * 1676 * Sets the requested powergating state for all instances of 1677 * the hardware IP specified. 1678 * Returns the error code from the last instance. 1679 */ 1680 int amdgpu_device_ip_set_powergating_state(void *dev, 1681 enum amd_ip_block_type block_type, 1682 enum amd_powergating_state state) 1683 { 1684 struct amdgpu_device *adev = dev; 1685 int i, r = 0; 1686 1687 for (i = 0; i < adev->num_ip_blocks; i++) { 1688 if (!adev->ip_blocks[i].status.valid) 1689 continue; 1690 if (adev->ip_blocks[i].version->type != block_type) 1691 continue; 1692 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) 1693 continue; 1694 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 1695 (void *)adev, state); 1696 if (r) 1697 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", 1698 adev->ip_blocks[i].version->funcs->name, r); 1699 } 1700 return r; 1701 } 1702 1703 /** 1704 * amdgpu_device_ip_get_clockgating_state - get the CG state 1705 * 1706 * @adev: amdgpu_device pointer 1707 * @flags: clockgating feature flags 1708 * 1709 * Walks the list of IPs on the device and updates the clockgating 1710 * flags for each IP. 1711 * Updates @flags with the feature flags for each hardware IP where 1712 * clockgating is enabled. 1713 */ 1714 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 1715 u64 *flags) 1716 { 1717 int i; 1718 1719 for (i = 0; i < adev->num_ip_blocks; i++) { 1720 if (!adev->ip_blocks[i].status.valid) 1721 continue; 1722 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) 1723 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); 1724 } 1725 } 1726 1727 /** 1728 * amdgpu_device_ip_wait_for_idle - wait for idle 1729 * 1730 * @adev: amdgpu_device pointer 1731 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1732 * 1733 * Waits for the request hardware IP to be idle. 1734 * Returns 0 for success or a negative error code on failure. 1735 */ 1736 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 1737 enum amd_ip_block_type block_type) 1738 { 1739 int i, r; 1740 1741 for (i = 0; i < adev->num_ip_blocks; i++) { 1742 if (!adev->ip_blocks[i].status.valid) 1743 continue; 1744 if (adev->ip_blocks[i].version->type == block_type) { 1745 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); 1746 if (r) 1747 return r; 1748 break; 1749 } 1750 } 1751 return 0; 1752 1753 } 1754 1755 /** 1756 * amdgpu_device_ip_is_idle - is the hardware IP idle 1757 * 1758 * @adev: amdgpu_device pointer 1759 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1760 * 1761 * Check if the hardware IP is idle or not. 1762 * Returns true if it the IP is idle, false if not. 1763 */ 1764 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 1765 enum amd_ip_block_type block_type) 1766 { 1767 int i; 1768 1769 for (i = 0; i < adev->num_ip_blocks; i++) { 1770 if (!adev->ip_blocks[i].status.valid) 1771 continue; 1772 if (adev->ip_blocks[i].version->type == block_type) 1773 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); 1774 } 1775 return true; 1776 1777 } 1778 1779 /** 1780 * amdgpu_device_ip_get_ip_block - get a hw IP pointer 1781 * 1782 * @adev: amdgpu_device pointer 1783 * @type: Type of hardware IP (SMU, GFX, UVD, etc.) 1784 * 1785 * Returns a pointer to the hardware IP block structure 1786 * if it exists for the asic, otherwise NULL. 1787 */ 1788 struct amdgpu_ip_block * 1789 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 1790 enum amd_ip_block_type type) 1791 { 1792 int i; 1793 1794 for (i = 0; i < adev->num_ip_blocks; i++) 1795 if (adev->ip_blocks[i].version->type == type) 1796 return &adev->ip_blocks[i]; 1797 1798 return NULL; 1799 } 1800 1801 /** 1802 * amdgpu_device_ip_block_version_cmp 1803 * 1804 * @adev: amdgpu_device pointer 1805 * @type: enum amd_ip_block_type 1806 * @major: major version 1807 * @minor: minor version 1808 * 1809 * return 0 if equal or greater 1810 * return 1 if smaller or the ip_block doesn't exist 1811 */ 1812 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 1813 enum amd_ip_block_type type, 1814 u32 major, u32 minor) 1815 { 1816 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type); 1817 1818 if (ip_block && ((ip_block->version->major > major) || 1819 ((ip_block->version->major == major) && 1820 (ip_block->version->minor >= minor)))) 1821 return 0; 1822 1823 return 1; 1824 } 1825 1826 /** 1827 * amdgpu_device_ip_block_add 1828 * 1829 * @adev: amdgpu_device pointer 1830 * @ip_block_version: pointer to the IP to add 1831 * 1832 * Adds the IP block driver information to the collection of IPs 1833 * on the asic. 1834 */ 1835 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 1836 const struct amdgpu_ip_block_version *ip_block_version) 1837 { 1838 if (!ip_block_version) 1839 return -EINVAL; 1840 1841 switch (ip_block_version->type) { 1842 case AMD_IP_BLOCK_TYPE_VCN: 1843 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK) 1844 return 0; 1845 break; 1846 case AMD_IP_BLOCK_TYPE_JPEG: 1847 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK) 1848 return 0; 1849 break; 1850 default: 1851 break; 1852 } 1853 1854 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, 1855 ip_block_version->funcs->name); 1856 1857 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; 1858 1859 return 0; 1860 } 1861 1862 /** 1863 * amdgpu_device_enable_virtual_display - enable virtual display feature 1864 * 1865 * @adev: amdgpu_device pointer 1866 * 1867 * Enabled the virtual display feature if the user has enabled it via 1868 * the module parameter virtual_display. This feature provides a virtual 1869 * display hardware on headless boards or in virtualized environments. 1870 * This function parses and validates the configuration string specified by 1871 * the user and configues the virtual display configuration (number of 1872 * virtual connectors, crtcs, etc.) specified. 1873 */ 1874 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) 1875 { 1876 adev->enable_virtual_display = false; 1877 1878 if (amdgpu_virtual_display) { 1879 const char *pci_address_name = pci_name(adev->pdev); 1880 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; 1881 1882 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 1883 pciaddstr_tmp = pciaddstr; 1884 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { 1885 pciaddname = strsep(&pciaddname_tmp, ","); 1886 if (!strcmp("all", pciaddname) 1887 || !strcmp(pci_address_name, pciaddname)) { 1888 long num_crtc; 1889 int res = -1; 1890 1891 adev->enable_virtual_display = true; 1892 1893 if (pciaddname_tmp) 1894 res = kstrtol(pciaddname_tmp, 10, 1895 &num_crtc); 1896 1897 if (!res) { 1898 if (num_crtc < 1) 1899 num_crtc = 1; 1900 if (num_crtc > 6) 1901 num_crtc = 6; 1902 adev->mode_info.num_crtc = num_crtc; 1903 } else { 1904 adev->mode_info.num_crtc = 1; 1905 } 1906 break; 1907 } 1908 } 1909 1910 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 1911 amdgpu_virtual_display, pci_address_name, 1912 adev->enable_virtual_display, adev->mode_info.num_crtc); 1913 1914 kfree(pciaddstr); 1915 } 1916 } 1917 1918 /** 1919 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware 1920 * 1921 * @adev: amdgpu_device pointer 1922 * 1923 * Parses the asic configuration parameters specified in the gpu info 1924 * firmware and makes them availale to the driver for use in configuring 1925 * the asic. 1926 * Returns 0 on success, -EINVAL on failure. 1927 */ 1928 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) 1929 { 1930 const char *chip_name; 1931 char fw_name[40]; 1932 int err; 1933 const struct gpu_info_firmware_header_v1_0 *hdr; 1934 1935 adev->firmware.gpu_info_fw = NULL; 1936 1937 if (adev->mman.discovery_bin) { 1938 /* 1939 * FIXME: The bounding box is still needed by Navi12, so 1940 * temporarily read it from gpu_info firmware. Should be dropped 1941 * when DAL no longer needs it. 1942 */ 1943 if (adev->asic_type != CHIP_NAVI12) 1944 return 0; 1945 } 1946 1947 switch (adev->asic_type) { 1948 default: 1949 return 0; 1950 case CHIP_VEGA10: 1951 chip_name = "vega10"; 1952 break; 1953 case CHIP_VEGA12: 1954 chip_name = "vega12"; 1955 break; 1956 case CHIP_RAVEN: 1957 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1958 chip_name = "raven2"; 1959 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 1960 chip_name = "picasso"; 1961 else 1962 chip_name = "raven"; 1963 break; 1964 case CHIP_ARCTURUS: 1965 chip_name = "arcturus"; 1966 break; 1967 case CHIP_NAVI12: 1968 chip_name = "navi12"; 1969 break; 1970 } 1971 1972 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); 1973 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); 1974 if (err) { 1975 dev_err(adev->dev, 1976 "Failed to load gpu_info firmware \"%s\"\n", 1977 fw_name); 1978 goto out; 1979 } 1980 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); 1981 if (err) { 1982 dev_err(adev->dev, 1983 "Failed to validate gpu_info firmware \"%s\"\n", 1984 fw_name); 1985 goto out; 1986 } 1987 1988 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; 1989 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); 1990 1991 switch (hdr->version_major) { 1992 case 1: 1993 { 1994 const struct gpu_info_firmware_v1_0 *gpu_info_fw = 1995 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + 1996 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1997 1998 /* 1999 * Should be droped when DAL no longer needs it. 2000 */ 2001 if (adev->asic_type == CHIP_NAVI12) 2002 goto parse_soc_bounding_box; 2003 2004 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); 2005 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); 2006 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); 2007 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); 2008 adev->gfx.config.max_texture_channel_caches = 2009 le32_to_cpu(gpu_info_fw->gc_num_tccs); 2010 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); 2011 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); 2012 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); 2013 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); 2014 adev->gfx.config.double_offchip_lds_buf = 2015 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); 2016 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); 2017 adev->gfx.cu_info.max_waves_per_simd = 2018 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); 2019 adev->gfx.cu_info.max_scratch_slots_per_cu = 2020 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); 2021 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); 2022 if (hdr->version_minor >= 1) { 2023 const struct gpu_info_firmware_v1_1 *gpu_info_fw = 2024 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + 2025 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2026 adev->gfx.config.num_sc_per_sh = 2027 le32_to_cpu(gpu_info_fw->num_sc_per_sh); 2028 adev->gfx.config.num_packer_per_sc = 2029 le32_to_cpu(gpu_info_fw->num_packer_per_sc); 2030 } 2031 2032 parse_soc_bounding_box: 2033 /* 2034 * soc bounding box info is not integrated in disocovery table, 2035 * we always need to parse it from gpu info firmware if needed. 2036 */ 2037 if (hdr->version_minor == 2) { 2038 const struct gpu_info_firmware_v1_2 *gpu_info_fw = 2039 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + 2040 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2041 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; 2042 } 2043 break; 2044 } 2045 default: 2046 dev_err(adev->dev, 2047 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); 2048 err = -EINVAL; 2049 goto out; 2050 } 2051 out: 2052 return err; 2053 } 2054 2055 /** 2056 * amdgpu_device_ip_early_init - run early init for hardware IPs 2057 * 2058 * @adev: amdgpu_device pointer 2059 * 2060 * Early initialization pass for hardware IPs. The hardware IPs that make 2061 * up each asic are discovered each IP's early_init callback is run. This 2062 * is the first stage in initializing the asic. 2063 * Returns 0 on success, negative error code on failure. 2064 */ 2065 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) 2066 { 2067 struct drm_device *dev = adev_to_drm(adev); 2068 struct pci_dev *parent; 2069 int i, r; 2070 2071 amdgpu_device_enable_virtual_display(adev); 2072 2073 if (amdgpu_sriov_vf(adev)) { 2074 r = amdgpu_virt_request_full_gpu(adev, true); 2075 if (r) 2076 return r; 2077 } 2078 2079 switch (adev->asic_type) { 2080 #ifdef CONFIG_DRM_AMDGPU_SI 2081 case CHIP_VERDE: 2082 case CHIP_TAHITI: 2083 case CHIP_PITCAIRN: 2084 case CHIP_OLAND: 2085 case CHIP_HAINAN: 2086 adev->family = AMDGPU_FAMILY_SI; 2087 r = si_set_ip_blocks(adev); 2088 if (r) 2089 return r; 2090 break; 2091 #endif 2092 #ifdef CONFIG_DRM_AMDGPU_CIK 2093 case CHIP_BONAIRE: 2094 case CHIP_HAWAII: 2095 case CHIP_KAVERI: 2096 case CHIP_KABINI: 2097 case CHIP_MULLINS: 2098 if (adev->flags & AMD_IS_APU) 2099 adev->family = AMDGPU_FAMILY_KV; 2100 else 2101 adev->family = AMDGPU_FAMILY_CI; 2102 2103 r = cik_set_ip_blocks(adev); 2104 if (r) 2105 return r; 2106 break; 2107 #endif 2108 case CHIP_TOPAZ: 2109 case CHIP_TONGA: 2110 case CHIP_FIJI: 2111 case CHIP_POLARIS10: 2112 case CHIP_POLARIS11: 2113 case CHIP_POLARIS12: 2114 case CHIP_VEGAM: 2115 case CHIP_CARRIZO: 2116 case CHIP_STONEY: 2117 if (adev->flags & AMD_IS_APU) 2118 adev->family = AMDGPU_FAMILY_CZ; 2119 else 2120 adev->family = AMDGPU_FAMILY_VI; 2121 2122 r = vi_set_ip_blocks(adev); 2123 if (r) 2124 return r; 2125 break; 2126 default: 2127 r = amdgpu_discovery_set_ip_blocks(adev); 2128 if (r) 2129 return r; 2130 break; 2131 } 2132 2133 if (amdgpu_has_atpx() && 2134 (amdgpu_is_atpx_hybrid() || 2135 amdgpu_has_atpx_dgpu_power_cntl()) && 2136 ((adev->flags & AMD_IS_APU) == 0) && 2137 !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) 2138 adev->flags |= AMD_IS_PX; 2139 2140 if (!(adev->flags & AMD_IS_APU)) { 2141 parent = pci_upstream_bridge(adev->pdev); 2142 adev->has_pr3 = parent ? pci_pr3_present(parent) : false; 2143 } 2144 2145 amdgpu_amdkfd_device_probe(adev); 2146 2147 adev->pm.pp_feature = amdgpu_pp_feature_mask; 2148 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) 2149 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2150 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) 2151 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; 2152 2153 for (i = 0; i < adev->num_ip_blocks; i++) { 2154 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 2155 DRM_ERROR("disabled ip block: %d <%s>\n", 2156 i, adev->ip_blocks[i].version->funcs->name); 2157 adev->ip_blocks[i].status.valid = false; 2158 } else { 2159 if (adev->ip_blocks[i].version->funcs->early_init) { 2160 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); 2161 if (r == -ENOENT) { 2162 adev->ip_blocks[i].status.valid = false; 2163 } else if (r) { 2164 DRM_ERROR("early_init of IP block <%s> failed %d\n", 2165 adev->ip_blocks[i].version->funcs->name, r); 2166 return r; 2167 } else { 2168 adev->ip_blocks[i].status.valid = true; 2169 } 2170 } else { 2171 adev->ip_blocks[i].status.valid = true; 2172 } 2173 } 2174 /* get the vbios after the asic_funcs are set up */ 2175 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 2176 r = amdgpu_device_parse_gpu_info_fw(adev); 2177 if (r) 2178 return r; 2179 2180 /* Read BIOS */ 2181 if (!amdgpu_get_bios(adev)) 2182 return -EINVAL; 2183 2184 r = amdgpu_atombios_init(adev); 2185 if (r) { 2186 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 2187 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); 2188 return r; 2189 } 2190 2191 /*get pf2vf msg info at it's earliest time*/ 2192 if (amdgpu_sriov_vf(adev)) 2193 amdgpu_virt_init_data_exchange(adev); 2194 2195 } 2196 } 2197 2198 adev->cg_flags &= amdgpu_cg_mask; 2199 adev->pg_flags &= amdgpu_pg_mask; 2200 2201 return 0; 2202 } 2203 2204 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) 2205 { 2206 int i, r; 2207 2208 for (i = 0; i < adev->num_ip_blocks; i++) { 2209 if (!adev->ip_blocks[i].status.sw) 2210 continue; 2211 if (adev->ip_blocks[i].status.hw) 2212 continue; 2213 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2214 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || 2215 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 2216 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2217 if (r) { 2218 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2219 adev->ip_blocks[i].version->funcs->name, r); 2220 return r; 2221 } 2222 adev->ip_blocks[i].status.hw = true; 2223 } 2224 } 2225 2226 return 0; 2227 } 2228 2229 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) 2230 { 2231 int i, r; 2232 2233 for (i = 0; i < adev->num_ip_blocks; i++) { 2234 if (!adev->ip_blocks[i].status.sw) 2235 continue; 2236 if (adev->ip_blocks[i].status.hw) 2237 continue; 2238 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2239 if (r) { 2240 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2241 adev->ip_blocks[i].version->funcs->name, r); 2242 return r; 2243 } 2244 adev->ip_blocks[i].status.hw = true; 2245 } 2246 2247 return 0; 2248 } 2249 2250 static int amdgpu_device_fw_loading(struct amdgpu_device *adev) 2251 { 2252 int r = 0; 2253 int i; 2254 uint32_t smu_version; 2255 2256 if (adev->asic_type >= CHIP_VEGA10) { 2257 for (i = 0; i < adev->num_ip_blocks; i++) { 2258 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) 2259 continue; 2260 2261 if (!adev->ip_blocks[i].status.sw) 2262 continue; 2263 2264 /* no need to do the fw loading again if already done*/ 2265 if (adev->ip_blocks[i].status.hw == true) 2266 break; 2267 2268 if (amdgpu_in_reset(adev) || adev->in_suspend) { 2269 r = adev->ip_blocks[i].version->funcs->resume(adev); 2270 if (r) { 2271 DRM_ERROR("resume of IP block <%s> failed %d\n", 2272 adev->ip_blocks[i].version->funcs->name, r); 2273 return r; 2274 } 2275 } else { 2276 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2277 if (r) { 2278 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2279 adev->ip_blocks[i].version->funcs->name, r); 2280 return r; 2281 } 2282 } 2283 2284 adev->ip_blocks[i].status.hw = true; 2285 break; 2286 } 2287 } 2288 2289 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) 2290 r = amdgpu_pm_load_smu_firmware(adev, &smu_version); 2291 2292 return r; 2293 } 2294 2295 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) 2296 { 2297 long timeout; 2298 int r, i; 2299 2300 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2301 struct amdgpu_ring *ring = adev->rings[i]; 2302 2303 /* No need to setup the GPU scheduler for rings that don't need it */ 2304 if (!ring || ring->no_scheduler) 2305 continue; 2306 2307 switch (ring->funcs->type) { 2308 case AMDGPU_RING_TYPE_GFX: 2309 timeout = adev->gfx_timeout; 2310 break; 2311 case AMDGPU_RING_TYPE_COMPUTE: 2312 timeout = adev->compute_timeout; 2313 break; 2314 case AMDGPU_RING_TYPE_SDMA: 2315 timeout = adev->sdma_timeout; 2316 break; 2317 default: 2318 timeout = adev->video_timeout; 2319 break; 2320 } 2321 2322 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, 2323 ring->num_hw_submission, amdgpu_job_hang_limit, 2324 timeout, adev->reset_domain->wq, 2325 ring->sched_score, ring->name, 2326 adev->dev); 2327 if (r) { 2328 DRM_ERROR("Failed to create scheduler on ring %s.\n", 2329 ring->name); 2330 return r; 2331 } 2332 } 2333 2334 return 0; 2335 } 2336 2337 2338 /** 2339 * amdgpu_device_ip_init - run init for hardware IPs 2340 * 2341 * @adev: amdgpu_device pointer 2342 * 2343 * Main initialization pass for hardware IPs. The list of all the hardware 2344 * IPs that make up the asic is walked and the sw_init and hw_init callbacks 2345 * are run. sw_init initializes the software state associated with each IP 2346 * and hw_init initializes the hardware associated with each IP. 2347 * Returns 0 on success, negative error code on failure. 2348 */ 2349 static int amdgpu_device_ip_init(struct amdgpu_device *adev) 2350 { 2351 int i, r; 2352 2353 r = amdgpu_ras_init(adev); 2354 if (r) 2355 return r; 2356 2357 for (i = 0; i < adev->num_ip_blocks; i++) { 2358 if (!adev->ip_blocks[i].status.valid) 2359 continue; 2360 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); 2361 if (r) { 2362 DRM_ERROR("sw_init of IP block <%s> failed %d\n", 2363 adev->ip_blocks[i].version->funcs->name, r); 2364 goto init_failed; 2365 } 2366 adev->ip_blocks[i].status.sw = true; 2367 2368 /* need to do gmc hw init early so we can allocate gpu mem */ 2369 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2370 /* Try to reserve bad pages early */ 2371 if (amdgpu_sriov_vf(adev)) 2372 amdgpu_virt_exchange_data(adev); 2373 2374 r = amdgpu_device_vram_scratch_init(adev); 2375 if (r) { 2376 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 2377 goto init_failed; 2378 } 2379 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 2380 if (r) { 2381 DRM_ERROR("hw_init %d failed %d\n", i, r); 2382 goto init_failed; 2383 } 2384 r = amdgpu_device_wb_init(adev); 2385 if (r) { 2386 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); 2387 goto init_failed; 2388 } 2389 adev->ip_blocks[i].status.hw = true; 2390 2391 /* right after GMC hw init, we create CSA */ 2392 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 2393 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, 2394 AMDGPU_GEM_DOMAIN_VRAM, 2395 AMDGPU_CSA_SIZE); 2396 if (r) { 2397 DRM_ERROR("allocate CSA failed %d\n", r); 2398 goto init_failed; 2399 } 2400 } 2401 } 2402 } 2403 2404 if (amdgpu_sriov_vf(adev)) 2405 amdgpu_virt_init_data_exchange(adev); 2406 2407 r = amdgpu_ib_pool_init(adev); 2408 if (r) { 2409 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 2410 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); 2411 goto init_failed; 2412 } 2413 2414 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ 2415 if (r) 2416 goto init_failed; 2417 2418 r = amdgpu_device_ip_hw_init_phase1(adev); 2419 if (r) 2420 goto init_failed; 2421 2422 r = amdgpu_device_fw_loading(adev); 2423 if (r) 2424 goto init_failed; 2425 2426 r = amdgpu_device_ip_hw_init_phase2(adev); 2427 if (r) 2428 goto init_failed; 2429 2430 /* 2431 * retired pages will be loaded from eeprom and reserved here, 2432 * it should be called after amdgpu_device_ip_hw_init_phase2 since 2433 * for some ASICs the RAS EEPROM code relies on SMU fully functioning 2434 * for I2C communication which only true at this point. 2435 * 2436 * amdgpu_ras_recovery_init may fail, but the upper only cares the 2437 * failure from bad gpu situation and stop amdgpu init process 2438 * accordingly. For other failed cases, it will still release all 2439 * the resource and print error message, rather than returning one 2440 * negative value to upper level. 2441 * 2442 * Note: theoretically, this should be called before all vram allocations 2443 * to protect retired page from abusing 2444 */ 2445 r = amdgpu_ras_recovery_init(adev); 2446 if (r) 2447 goto init_failed; 2448 2449 /** 2450 * In case of XGMI grab extra reference for reset domain for this device 2451 */ 2452 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2453 if (amdgpu_xgmi_add_device(adev) == 0) { 2454 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 2455 2456 if (!hive->reset_domain || 2457 !amdgpu_reset_get_reset_domain(hive->reset_domain)) { 2458 r = -ENOENT; 2459 amdgpu_put_xgmi_hive(hive); 2460 goto init_failed; 2461 } 2462 2463 /* Drop the early temporary reset domain we created for device */ 2464 amdgpu_reset_put_reset_domain(adev->reset_domain); 2465 adev->reset_domain = hive->reset_domain; 2466 amdgpu_put_xgmi_hive(hive); 2467 } 2468 } 2469 2470 r = amdgpu_device_init_schedulers(adev); 2471 if (r) 2472 goto init_failed; 2473 2474 /* Don't init kfd if whole hive need to be reset during init */ 2475 if (!adev->gmc.xgmi.pending_reset) 2476 amdgpu_amdkfd_device_init(adev); 2477 2478 amdgpu_fru_get_product_info(adev); 2479 2480 init_failed: 2481 if (amdgpu_sriov_vf(adev)) 2482 amdgpu_virt_release_full_gpu(adev, true); 2483 2484 return r; 2485 } 2486 2487 /** 2488 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer 2489 * 2490 * @adev: amdgpu_device pointer 2491 * 2492 * Writes a reset magic value to the gart pointer in VRAM. The driver calls 2493 * this function before a GPU reset. If the value is retained after a 2494 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents. 2495 */ 2496 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) 2497 { 2498 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); 2499 } 2500 2501 /** 2502 * amdgpu_device_check_vram_lost - check if vram is valid 2503 * 2504 * @adev: amdgpu_device pointer 2505 * 2506 * Checks the reset magic value written to the gart pointer in VRAM. 2507 * The driver calls this after a GPU reset to see if the contents of 2508 * VRAM is lost or now. 2509 * returns true if vram is lost, false if not. 2510 */ 2511 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) 2512 { 2513 if (memcmp(adev->gart.ptr, adev->reset_magic, 2514 AMDGPU_RESET_MAGIC_NUM)) 2515 return true; 2516 2517 if (!amdgpu_in_reset(adev)) 2518 return false; 2519 2520 /* 2521 * For all ASICs with baco/mode1 reset, the VRAM is 2522 * always assumed to be lost. 2523 */ 2524 switch (amdgpu_asic_reset_method(adev)) { 2525 case AMD_RESET_METHOD_BACO: 2526 case AMD_RESET_METHOD_MODE1: 2527 return true; 2528 default: 2529 return false; 2530 } 2531 } 2532 2533 /** 2534 * amdgpu_device_set_cg_state - set clockgating for amdgpu device 2535 * 2536 * @adev: amdgpu_device pointer 2537 * @state: clockgating state (gate or ungate) 2538 * 2539 * The list of all the hardware IPs that make up the asic is walked and the 2540 * set_clockgating_state callbacks are run. 2541 * Late initialization pass enabling clockgating for hardware IPs. 2542 * Fini or suspend, pass disabling clockgating for hardware IPs. 2543 * Returns 0 on success, negative error code on failure. 2544 */ 2545 2546 int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 2547 enum amd_clockgating_state state) 2548 { 2549 int i, j, r; 2550 2551 if (amdgpu_emu_mode == 1) 2552 return 0; 2553 2554 for (j = 0; j < adev->num_ip_blocks; j++) { 2555 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2556 if (!adev->ip_blocks[i].status.late_initialized) 2557 continue; 2558 /* skip CG for GFX on S0ix */ 2559 if (adev->in_s0ix && 2560 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2561 continue; 2562 /* skip CG for VCE/UVD, it's handled specially */ 2563 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2564 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2565 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2566 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2567 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 2568 /* enable clockgating to save power */ 2569 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 2570 state); 2571 if (r) { 2572 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 2573 adev->ip_blocks[i].version->funcs->name, r); 2574 return r; 2575 } 2576 } 2577 } 2578 2579 return 0; 2580 } 2581 2582 int amdgpu_device_set_pg_state(struct amdgpu_device *adev, 2583 enum amd_powergating_state state) 2584 { 2585 int i, j, r; 2586 2587 if (amdgpu_emu_mode == 1) 2588 return 0; 2589 2590 for (j = 0; j < adev->num_ip_blocks; j++) { 2591 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2592 if (!adev->ip_blocks[i].status.late_initialized) 2593 continue; 2594 /* skip PG for GFX on S0ix */ 2595 if (adev->in_s0ix && 2596 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2597 continue; 2598 /* skip CG for VCE/UVD, it's handled specially */ 2599 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2600 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2601 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2602 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2603 adev->ip_blocks[i].version->funcs->set_powergating_state) { 2604 /* enable powergating to save power */ 2605 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, 2606 state); 2607 if (r) { 2608 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", 2609 adev->ip_blocks[i].version->funcs->name, r); 2610 return r; 2611 } 2612 } 2613 } 2614 return 0; 2615 } 2616 2617 static int amdgpu_device_enable_mgpu_fan_boost(void) 2618 { 2619 struct amdgpu_gpu_instance *gpu_ins; 2620 struct amdgpu_device *adev; 2621 int i, ret = 0; 2622 2623 mutex_lock(&mgpu_info.mutex); 2624 2625 /* 2626 * MGPU fan boost feature should be enabled 2627 * only when there are two or more dGPUs in 2628 * the system 2629 */ 2630 if (mgpu_info.num_dgpu < 2) 2631 goto out; 2632 2633 for (i = 0; i < mgpu_info.num_dgpu; i++) { 2634 gpu_ins = &(mgpu_info.gpu_ins[i]); 2635 adev = gpu_ins->adev; 2636 if (!(adev->flags & AMD_IS_APU) && 2637 !gpu_ins->mgpu_fan_enabled) { 2638 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); 2639 if (ret) 2640 break; 2641 2642 gpu_ins->mgpu_fan_enabled = 1; 2643 } 2644 } 2645 2646 out: 2647 mutex_unlock(&mgpu_info.mutex); 2648 2649 return ret; 2650 } 2651 2652 /** 2653 * amdgpu_device_ip_late_init - run late init for hardware IPs 2654 * 2655 * @adev: amdgpu_device pointer 2656 * 2657 * Late initialization pass for hardware IPs. The list of all the hardware 2658 * IPs that make up the asic is walked and the late_init callbacks are run. 2659 * late_init covers any special initialization that an IP requires 2660 * after all of the have been initialized or something that needs to happen 2661 * late in the init process. 2662 * Returns 0 on success, negative error code on failure. 2663 */ 2664 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) 2665 { 2666 struct amdgpu_gpu_instance *gpu_instance; 2667 int i = 0, r; 2668 2669 for (i = 0; i < adev->num_ip_blocks; i++) { 2670 if (!adev->ip_blocks[i].status.hw) 2671 continue; 2672 if (adev->ip_blocks[i].version->funcs->late_init) { 2673 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); 2674 if (r) { 2675 DRM_ERROR("late_init of IP block <%s> failed %d\n", 2676 adev->ip_blocks[i].version->funcs->name, r); 2677 return r; 2678 } 2679 } 2680 adev->ip_blocks[i].status.late_initialized = true; 2681 } 2682 2683 r = amdgpu_ras_late_init(adev); 2684 if (r) { 2685 DRM_ERROR("amdgpu_ras_late_init failed %d", r); 2686 return r; 2687 } 2688 2689 amdgpu_ras_set_error_query_ready(adev, true); 2690 2691 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); 2692 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); 2693 2694 amdgpu_device_fill_reset_magic(adev); 2695 2696 r = amdgpu_device_enable_mgpu_fan_boost(); 2697 if (r) 2698 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); 2699 2700 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ 2701 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)|| 2702 adev->asic_type == CHIP_ALDEBARAN )) 2703 amdgpu_dpm_handle_passthrough_sbr(adev, true); 2704 2705 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2706 mutex_lock(&mgpu_info.mutex); 2707 2708 /* 2709 * Reset device p-state to low as this was booted with high. 2710 * 2711 * This should be performed only after all devices from the same 2712 * hive get initialized. 2713 * 2714 * However, it's unknown how many device in the hive in advance. 2715 * As this is counted one by one during devices initializations. 2716 * 2717 * So, we wait for all XGMI interlinked devices initialized. 2718 * This may bring some delays as those devices may come from 2719 * different hives. But that should be OK. 2720 */ 2721 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { 2722 for (i = 0; i < mgpu_info.num_gpu; i++) { 2723 gpu_instance = &(mgpu_info.gpu_ins[i]); 2724 if (gpu_instance->adev->flags & AMD_IS_APU) 2725 continue; 2726 2727 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 2728 AMDGPU_XGMI_PSTATE_MIN); 2729 if (r) { 2730 DRM_ERROR("pstate setting failed (%d).\n", r); 2731 break; 2732 } 2733 } 2734 } 2735 2736 mutex_unlock(&mgpu_info.mutex); 2737 } 2738 2739 return 0; 2740 } 2741 2742 /** 2743 * amdgpu_device_smu_fini_early - smu hw_fini wrapper 2744 * 2745 * @adev: amdgpu_device pointer 2746 * 2747 * For ASICs need to disable SMC first 2748 */ 2749 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) 2750 { 2751 int i, r; 2752 2753 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) 2754 return; 2755 2756 for (i = 0; i < adev->num_ip_blocks; i++) { 2757 if (!adev->ip_blocks[i].status.hw) 2758 continue; 2759 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 2760 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2761 /* XXX handle errors */ 2762 if (r) { 2763 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2764 adev->ip_blocks[i].version->funcs->name, r); 2765 } 2766 adev->ip_blocks[i].status.hw = false; 2767 break; 2768 } 2769 } 2770 } 2771 2772 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) 2773 { 2774 int i, r; 2775 2776 for (i = 0; i < adev->num_ip_blocks; i++) { 2777 if (!adev->ip_blocks[i].version->funcs->early_fini) 2778 continue; 2779 2780 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); 2781 if (r) { 2782 DRM_DEBUG("early_fini of IP block <%s> failed %d\n", 2783 adev->ip_blocks[i].version->funcs->name, r); 2784 } 2785 } 2786 2787 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2788 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2789 2790 amdgpu_amdkfd_suspend(adev, false); 2791 2792 /* Workaroud for ASICs need to disable SMC first */ 2793 amdgpu_device_smu_fini_early(adev); 2794 2795 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2796 if (!adev->ip_blocks[i].status.hw) 2797 continue; 2798 2799 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2800 /* XXX handle errors */ 2801 if (r) { 2802 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2803 adev->ip_blocks[i].version->funcs->name, r); 2804 } 2805 2806 adev->ip_blocks[i].status.hw = false; 2807 } 2808 2809 if (amdgpu_sriov_vf(adev)) { 2810 if (amdgpu_virt_release_full_gpu(adev, false)) 2811 DRM_ERROR("failed to release exclusive mode on fini\n"); 2812 } 2813 2814 return 0; 2815 } 2816 2817 /** 2818 * amdgpu_device_ip_fini - run fini for hardware IPs 2819 * 2820 * @adev: amdgpu_device pointer 2821 * 2822 * Main teardown pass for hardware IPs. The list of all the hardware 2823 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks 2824 * are run. hw_fini tears down the hardware associated with each IP 2825 * and sw_fini tears down any software state associated with each IP. 2826 * Returns 0 on success, negative error code on failure. 2827 */ 2828 static int amdgpu_device_ip_fini(struct amdgpu_device *adev) 2829 { 2830 int i, r; 2831 2832 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) 2833 amdgpu_virt_release_ras_err_handler_data(adev); 2834 2835 if (adev->gmc.xgmi.num_physical_nodes > 1) 2836 amdgpu_xgmi_remove_device(adev); 2837 2838 amdgpu_amdkfd_device_fini_sw(adev); 2839 2840 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2841 if (!adev->ip_blocks[i].status.sw) 2842 continue; 2843 2844 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2845 amdgpu_ucode_free_bo(adev); 2846 amdgpu_free_static_csa(&adev->virt.csa_obj); 2847 amdgpu_device_wb_fini(adev); 2848 amdgpu_device_vram_scratch_fini(adev); 2849 amdgpu_ib_pool_fini(adev); 2850 } 2851 2852 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 2853 /* XXX handle errors */ 2854 if (r) { 2855 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", 2856 adev->ip_blocks[i].version->funcs->name, r); 2857 } 2858 adev->ip_blocks[i].status.sw = false; 2859 adev->ip_blocks[i].status.valid = false; 2860 } 2861 2862 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2863 if (!adev->ip_blocks[i].status.late_initialized) 2864 continue; 2865 if (adev->ip_blocks[i].version->funcs->late_fini) 2866 adev->ip_blocks[i].version->funcs->late_fini((void *)adev); 2867 adev->ip_blocks[i].status.late_initialized = false; 2868 } 2869 2870 amdgpu_ras_fini(adev); 2871 2872 return 0; 2873 } 2874 2875 /** 2876 * amdgpu_device_delayed_init_work_handler - work handler for IB tests 2877 * 2878 * @work: work_struct. 2879 */ 2880 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) 2881 { 2882 struct amdgpu_device *adev = 2883 container_of(work, struct amdgpu_device, delayed_init_work.work); 2884 int r; 2885 2886 r = amdgpu_ib_ring_tests(adev); 2887 if (r) 2888 DRM_ERROR("ib ring test failed (%d).\n", r); 2889 } 2890 2891 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) 2892 { 2893 struct amdgpu_device *adev = 2894 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); 2895 2896 WARN_ON_ONCE(adev->gfx.gfx_off_state); 2897 WARN_ON_ONCE(adev->gfx.gfx_off_req_count); 2898 2899 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) 2900 adev->gfx.gfx_off_state = true; 2901 } 2902 2903 /** 2904 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) 2905 * 2906 * @adev: amdgpu_device pointer 2907 * 2908 * Main suspend function for hardware IPs. The list of all the hardware 2909 * IPs that make up the asic is walked, clockgating is disabled and the 2910 * suspend callbacks are run. suspend puts the hardware and software state 2911 * in each IP into a state suitable for suspend. 2912 * Returns 0 on success, negative error code on failure. 2913 */ 2914 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) 2915 { 2916 int i, r; 2917 2918 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2919 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2920 2921 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2922 if (!adev->ip_blocks[i].status.valid) 2923 continue; 2924 2925 /* displays are handled separately */ 2926 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) 2927 continue; 2928 2929 /* XXX handle errors */ 2930 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2931 /* XXX handle errors */ 2932 if (r) { 2933 DRM_ERROR("suspend of IP block <%s> failed %d\n", 2934 adev->ip_blocks[i].version->funcs->name, r); 2935 return r; 2936 } 2937 2938 adev->ip_blocks[i].status.hw = false; 2939 } 2940 2941 return 0; 2942 } 2943 2944 /** 2945 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2) 2946 * 2947 * @adev: amdgpu_device pointer 2948 * 2949 * Main suspend function for hardware IPs. The list of all the hardware 2950 * IPs that make up the asic is walked, clockgating is disabled and the 2951 * suspend callbacks are run. suspend puts the hardware and software state 2952 * in each IP into a state suitable for suspend. 2953 * Returns 0 on success, negative error code on failure. 2954 */ 2955 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) 2956 { 2957 int i, r; 2958 2959 if (adev->in_s0ix) 2960 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry); 2961 2962 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2963 if (!adev->ip_blocks[i].status.valid) 2964 continue; 2965 /* displays are handled in phase1 */ 2966 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) 2967 continue; 2968 /* PSP lost connection when err_event_athub occurs */ 2969 if (amdgpu_ras_intr_triggered() && 2970 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 2971 adev->ip_blocks[i].status.hw = false; 2972 continue; 2973 } 2974 2975 /* skip unnecessary suspend if we do not initialize them yet */ 2976 if (adev->gmc.xgmi.pending_reset && 2977 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 2978 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC || 2979 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2980 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) { 2981 adev->ip_blocks[i].status.hw = false; 2982 continue; 2983 } 2984 2985 /* skip suspend of gfx and psp for S0ix 2986 * gfx is in gfxoff state, so on resume it will exit gfxoff just 2987 * like at runtime. PSP is also part of the always on hardware 2988 * so no need to suspend it. 2989 */ 2990 if (adev->in_s0ix && 2991 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || 2992 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)) 2993 continue; 2994 2995 /* XXX handle errors */ 2996 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2997 /* XXX handle errors */ 2998 if (r) { 2999 DRM_ERROR("suspend of IP block <%s> failed %d\n", 3000 adev->ip_blocks[i].version->funcs->name, r); 3001 } 3002 adev->ip_blocks[i].status.hw = false; 3003 /* handle putting the SMC in the appropriate state */ 3004 if(!amdgpu_sriov_vf(adev)){ 3005 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 3006 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); 3007 if (r) { 3008 DRM_ERROR("SMC failed to set mp1 state %d, %d\n", 3009 adev->mp1_state, r); 3010 return r; 3011 } 3012 } 3013 } 3014 } 3015 3016 return 0; 3017 } 3018 3019 /** 3020 * amdgpu_device_ip_suspend - run suspend for hardware IPs 3021 * 3022 * @adev: amdgpu_device pointer 3023 * 3024 * Main suspend function for hardware IPs. The list of all the hardware 3025 * IPs that make up the asic is walked, clockgating is disabled and the 3026 * suspend callbacks are run. suspend puts the hardware and software state 3027 * in each IP into a state suitable for suspend. 3028 * Returns 0 on success, negative error code on failure. 3029 */ 3030 int amdgpu_device_ip_suspend(struct amdgpu_device *adev) 3031 { 3032 int r; 3033 3034 if (amdgpu_sriov_vf(adev)) { 3035 amdgpu_virt_fini_data_exchange(adev); 3036 amdgpu_virt_request_full_gpu(adev, false); 3037 } 3038 3039 r = amdgpu_device_ip_suspend_phase1(adev); 3040 if (r) 3041 return r; 3042 r = amdgpu_device_ip_suspend_phase2(adev); 3043 3044 if (amdgpu_sriov_vf(adev)) 3045 amdgpu_virt_release_full_gpu(adev, false); 3046 3047 return r; 3048 } 3049 3050 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) 3051 { 3052 int i, r; 3053 3054 static enum amd_ip_block_type ip_order[] = { 3055 AMD_IP_BLOCK_TYPE_GMC, 3056 AMD_IP_BLOCK_TYPE_COMMON, 3057 AMD_IP_BLOCK_TYPE_PSP, 3058 AMD_IP_BLOCK_TYPE_IH, 3059 }; 3060 3061 for (i = 0; i < adev->num_ip_blocks; i++) { 3062 int j; 3063 struct amdgpu_ip_block *block; 3064 3065 block = &adev->ip_blocks[i]; 3066 block->status.hw = false; 3067 3068 for (j = 0; j < ARRAY_SIZE(ip_order); j++) { 3069 3070 if (block->version->type != ip_order[j] || 3071 !block->status.valid) 3072 continue; 3073 3074 r = block->version->funcs->hw_init(adev); 3075 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 3076 if (r) 3077 return r; 3078 block->status.hw = true; 3079 } 3080 } 3081 3082 return 0; 3083 } 3084 3085 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) 3086 { 3087 int i, r; 3088 3089 static enum amd_ip_block_type ip_order[] = { 3090 AMD_IP_BLOCK_TYPE_SMC, 3091 AMD_IP_BLOCK_TYPE_DCE, 3092 AMD_IP_BLOCK_TYPE_GFX, 3093 AMD_IP_BLOCK_TYPE_SDMA, 3094 AMD_IP_BLOCK_TYPE_UVD, 3095 AMD_IP_BLOCK_TYPE_VCE, 3096 AMD_IP_BLOCK_TYPE_VCN 3097 }; 3098 3099 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 3100 int j; 3101 struct amdgpu_ip_block *block; 3102 3103 for (j = 0; j < adev->num_ip_blocks; j++) { 3104 block = &adev->ip_blocks[j]; 3105 3106 if (block->version->type != ip_order[i] || 3107 !block->status.valid || 3108 block->status.hw) 3109 continue; 3110 3111 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) 3112 r = block->version->funcs->resume(adev); 3113 else 3114 r = block->version->funcs->hw_init(adev); 3115 3116 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 3117 if (r) 3118 return r; 3119 block->status.hw = true; 3120 } 3121 } 3122 3123 return 0; 3124 } 3125 3126 /** 3127 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs 3128 * 3129 * @adev: amdgpu_device pointer 3130 * 3131 * First resume function for hardware IPs. The list of all the hardware 3132 * IPs that make up the asic is walked and the resume callbacks are run for 3133 * COMMON, GMC, and IH. resume puts the hardware into a functional state 3134 * after a suspend and updates the software state as necessary. This 3135 * function is also used for restoring the GPU after a GPU reset. 3136 * Returns 0 on success, negative error code on failure. 3137 */ 3138 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) 3139 { 3140 int i, r; 3141 3142 for (i = 0; i < adev->num_ip_blocks; i++) { 3143 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3144 continue; 3145 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3146 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3147 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 3148 3149 r = adev->ip_blocks[i].version->funcs->resume(adev); 3150 if (r) { 3151 DRM_ERROR("resume of IP block <%s> failed %d\n", 3152 adev->ip_blocks[i].version->funcs->name, r); 3153 return r; 3154 } 3155 adev->ip_blocks[i].status.hw = true; 3156 } 3157 } 3158 3159 return 0; 3160 } 3161 3162 /** 3163 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs 3164 * 3165 * @adev: amdgpu_device pointer 3166 * 3167 * First resume function for hardware IPs. The list of all the hardware 3168 * IPs that make up the asic is walked and the resume callbacks are run for 3169 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a 3170 * functional state after a suspend and updates the software state as 3171 * necessary. This function is also used for restoring the GPU after a GPU 3172 * reset. 3173 * Returns 0 on success, negative error code on failure. 3174 */ 3175 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) 3176 { 3177 int i, r; 3178 3179 for (i = 0; i < adev->num_ip_blocks; i++) { 3180 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3181 continue; 3182 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3183 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3184 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3185 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) 3186 continue; 3187 r = adev->ip_blocks[i].version->funcs->resume(adev); 3188 if (r) { 3189 DRM_ERROR("resume of IP block <%s> failed %d\n", 3190 adev->ip_blocks[i].version->funcs->name, r); 3191 return r; 3192 } 3193 adev->ip_blocks[i].status.hw = true; 3194 } 3195 3196 return 0; 3197 } 3198 3199 /** 3200 * amdgpu_device_ip_resume - run resume for hardware IPs 3201 * 3202 * @adev: amdgpu_device pointer 3203 * 3204 * Main resume function for hardware IPs. The hardware IPs 3205 * are split into two resume functions because they are 3206 * are also used in in recovering from a GPU reset and some additional 3207 * steps need to be take between them. In this case (S3/S4) they are 3208 * run sequentially. 3209 * Returns 0 on success, negative error code on failure. 3210 */ 3211 static int amdgpu_device_ip_resume(struct amdgpu_device *adev) 3212 { 3213 int r; 3214 3215 r = amdgpu_amdkfd_resume_iommu(adev); 3216 if (r) 3217 return r; 3218 3219 r = amdgpu_device_ip_resume_phase1(adev); 3220 if (r) 3221 return r; 3222 3223 r = amdgpu_device_fw_loading(adev); 3224 if (r) 3225 return r; 3226 3227 r = amdgpu_device_ip_resume_phase2(adev); 3228 3229 return r; 3230 } 3231 3232 /** 3233 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV 3234 * 3235 * @adev: amdgpu_device pointer 3236 * 3237 * Query the VBIOS data tables to determine if the board supports SR-IOV. 3238 */ 3239 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) 3240 { 3241 if (amdgpu_sriov_vf(adev)) { 3242 if (adev->is_atom_fw) { 3243 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev)) 3244 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3245 } else { 3246 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) 3247 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3248 } 3249 3250 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) 3251 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); 3252 } 3253 } 3254 3255 /** 3256 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic 3257 * 3258 * @asic_type: AMD asic type 3259 * 3260 * Check if there is DC (new modesetting infrastructre) support for an asic. 3261 * returns true if DC has support, false if not. 3262 */ 3263 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) 3264 { 3265 switch (asic_type) { 3266 #ifdef CONFIG_DRM_AMDGPU_SI 3267 case CHIP_HAINAN: 3268 #endif 3269 case CHIP_TOPAZ: 3270 /* chips with no display hardware */ 3271 return false; 3272 #if defined(CONFIG_DRM_AMD_DC) 3273 case CHIP_TAHITI: 3274 case CHIP_PITCAIRN: 3275 case CHIP_VERDE: 3276 case CHIP_OLAND: 3277 /* 3278 * We have systems in the wild with these ASICs that require 3279 * LVDS and VGA support which is not supported with DC. 3280 * 3281 * Fallback to the non-DC driver here by default so as not to 3282 * cause regressions. 3283 */ 3284 #if defined(CONFIG_DRM_AMD_DC_SI) 3285 return amdgpu_dc > 0; 3286 #else 3287 return false; 3288 #endif 3289 case CHIP_BONAIRE: 3290 case CHIP_KAVERI: 3291 case CHIP_KABINI: 3292 case CHIP_MULLINS: 3293 /* 3294 * We have systems in the wild with these ASICs that require 3295 * VGA support which is not supported with DC. 3296 * 3297 * Fallback to the non-DC driver here by default so as not to 3298 * cause regressions. 3299 */ 3300 return amdgpu_dc > 0; 3301 default: 3302 return amdgpu_dc != 0; 3303 #else 3304 default: 3305 if (amdgpu_dc > 0) 3306 DRM_INFO_ONCE("Display Core has been requested via kernel parameter " 3307 "but isn't supported by ASIC, ignoring\n"); 3308 return false; 3309 #endif 3310 } 3311 } 3312 3313 /** 3314 * amdgpu_device_has_dc_support - check if dc is supported 3315 * 3316 * @adev: amdgpu_device pointer 3317 * 3318 * Returns true for supported, false for not supported 3319 */ 3320 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) 3321 { 3322 if (amdgpu_sriov_vf(adev) || 3323 adev->enable_virtual_display || 3324 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) 3325 return false; 3326 3327 return amdgpu_device_asic_has_dc_support(adev->asic_type); 3328 } 3329 3330 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) 3331 { 3332 struct amdgpu_device *adev = 3333 container_of(__work, struct amdgpu_device, xgmi_reset_work); 3334 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 3335 3336 /* It's a bug to not have a hive within this function */ 3337 if (WARN_ON(!hive)) 3338 return; 3339 3340 /* 3341 * Use task barrier to synchronize all xgmi reset works across the 3342 * hive. task_barrier_enter and task_barrier_exit will block 3343 * until all the threads running the xgmi reset works reach 3344 * those points. task_barrier_full will do both blocks. 3345 */ 3346 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 3347 3348 task_barrier_enter(&hive->tb); 3349 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); 3350 3351 if (adev->asic_reset_res) 3352 goto fail; 3353 3354 task_barrier_exit(&hive->tb); 3355 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); 3356 3357 if (adev->asic_reset_res) 3358 goto fail; 3359 3360 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && 3361 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) 3362 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev); 3363 } else { 3364 3365 task_barrier_full(&hive->tb); 3366 adev->asic_reset_res = amdgpu_asic_reset(adev); 3367 } 3368 3369 fail: 3370 if (adev->asic_reset_res) 3371 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", 3372 adev->asic_reset_res, adev_to_drm(adev)->unique); 3373 amdgpu_put_xgmi_hive(hive); 3374 } 3375 3376 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) 3377 { 3378 char *input = amdgpu_lockup_timeout; 3379 char *timeout_setting = NULL; 3380 int index = 0; 3381 long timeout; 3382 int ret = 0; 3383 3384 /* 3385 * By default timeout for non compute jobs is 10000 3386 * and 60000 for compute jobs. 3387 * In SR-IOV or passthrough mode, timeout for compute 3388 * jobs are 60000 by default. 3389 */ 3390 adev->gfx_timeout = msecs_to_jiffies(10000); 3391 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 3392 if (amdgpu_sriov_vf(adev)) 3393 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? 3394 msecs_to_jiffies(60000) : msecs_to_jiffies(10000); 3395 else 3396 adev->compute_timeout = msecs_to_jiffies(60000); 3397 3398 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 3399 while ((timeout_setting = strsep(&input, ",")) && 3400 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 3401 ret = kstrtol(timeout_setting, 0, &timeout); 3402 if (ret) 3403 return ret; 3404 3405 if (timeout == 0) { 3406 index++; 3407 continue; 3408 } else if (timeout < 0) { 3409 timeout = MAX_SCHEDULE_TIMEOUT; 3410 dev_warn(adev->dev, "lockup timeout disabled"); 3411 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); 3412 } else { 3413 timeout = msecs_to_jiffies(timeout); 3414 } 3415 3416 switch (index++) { 3417 case 0: 3418 adev->gfx_timeout = timeout; 3419 break; 3420 case 1: 3421 adev->compute_timeout = timeout; 3422 break; 3423 case 2: 3424 adev->sdma_timeout = timeout; 3425 break; 3426 case 3: 3427 adev->video_timeout = timeout; 3428 break; 3429 default: 3430 break; 3431 } 3432 } 3433 /* 3434 * There is only one value specified and 3435 * it should apply to all non-compute jobs. 3436 */ 3437 if (index == 1) { 3438 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 3439 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) 3440 adev->compute_timeout = adev->gfx_timeout; 3441 } 3442 } 3443 3444 return ret; 3445 } 3446 3447 /** 3448 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU 3449 * 3450 * @adev: amdgpu_device pointer 3451 * 3452 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode 3453 */ 3454 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev) 3455 { 3456 struct iommu_domain *domain; 3457 3458 domain = iommu_get_domain_for_dev(adev->dev); 3459 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY) 3460 adev->ram_is_direct_mapped = true; 3461 } 3462 3463 static const struct attribute *amdgpu_dev_attributes[] = { 3464 &dev_attr_product_name.attr, 3465 &dev_attr_product_number.attr, 3466 &dev_attr_serial_number.attr, 3467 &dev_attr_pcie_replay_count.attr, 3468 NULL 3469 }; 3470 3471 /** 3472 * amdgpu_device_init - initialize the driver 3473 * 3474 * @adev: amdgpu_device pointer 3475 * @flags: driver flags 3476 * 3477 * Initializes the driver info and hw (all asics). 3478 * Returns 0 for success or an error on failure. 3479 * Called at driver startup. 3480 */ 3481 int amdgpu_device_init(struct amdgpu_device *adev, 3482 uint32_t flags) 3483 { 3484 struct drm_device *ddev = adev_to_drm(adev); 3485 struct pci_dev *pdev = adev->pdev; 3486 int r, i; 3487 bool px = false; 3488 u32 max_MBps; 3489 3490 adev->shutdown = false; 3491 adev->flags = flags; 3492 3493 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) 3494 adev->asic_type = amdgpu_force_asic_type; 3495 else 3496 adev->asic_type = flags & AMD_ASIC_MASK; 3497 3498 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 3499 if (amdgpu_emu_mode == 1) 3500 adev->usec_timeout *= 10; 3501 adev->gmc.gart_size = 512 * 1024 * 1024; 3502 adev->accel_working = false; 3503 adev->num_rings = 0; 3504 adev->mman.buffer_funcs = NULL; 3505 adev->mman.buffer_funcs_ring = NULL; 3506 adev->vm_manager.vm_pte_funcs = NULL; 3507 adev->vm_manager.vm_pte_num_scheds = 0; 3508 adev->gmc.gmc_funcs = NULL; 3509 adev->harvest_ip_mask = 0x0; 3510 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 3511 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 3512 3513 adev->smc_rreg = &amdgpu_invalid_rreg; 3514 adev->smc_wreg = &amdgpu_invalid_wreg; 3515 adev->pcie_rreg = &amdgpu_invalid_rreg; 3516 adev->pcie_wreg = &amdgpu_invalid_wreg; 3517 adev->pciep_rreg = &amdgpu_invalid_rreg; 3518 adev->pciep_wreg = &amdgpu_invalid_wreg; 3519 adev->pcie_rreg64 = &amdgpu_invalid_rreg64; 3520 adev->pcie_wreg64 = &amdgpu_invalid_wreg64; 3521 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; 3522 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 3523 adev->didt_rreg = &amdgpu_invalid_rreg; 3524 adev->didt_wreg = &amdgpu_invalid_wreg; 3525 adev->gc_cac_rreg = &amdgpu_invalid_rreg; 3526 adev->gc_cac_wreg = &amdgpu_invalid_wreg; 3527 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 3528 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 3529 3530 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 3531 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 3532 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 3533 3534 /* mutex initialization are all done here so we 3535 * can recall function without having locking issues */ 3536 mutex_init(&adev->firmware.mutex); 3537 mutex_init(&adev->pm.mutex); 3538 mutex_init(&adev->gfx.gpu_clock_mutex); 3539 mutex_init(&adev->srbm_mutex); 3540 mutex_init(&adev->gfx.pipe_reserve_mutex); 3541 mutex_init(&adev->gfx.gfx_off_mutex); 3542 mutex_init(&adev->grbm_idx_mutex); 3543 mutex_init(&adev->mn_lock); 3544 mutex_init(&adev->virt.vf_errors.lock); 3545 hash_init(adev->mn_hash); 3546 mutex_init(&adev->psp.mutex); 3547 mutex_init(&adev->notifier_lock); 3548 mutex_init(&adev->pm.stable_pstate_ctx_lock); 3549 mutex_init(&adev->benchmark_mutex); 3550 3551 amdgpu_device_init_apu_flags(adev); 3552 3553 r = amdgpu_device_check_arguments(adev); 3554 if (r) 3555 return r; 3556 3557 spin_lock_init(&adev->mmio_idx_lock); 3558 spin_lock_init(&adev->smc_idx_lock); 3559 spin_lock_init(&adev->pcie_idx_lock); 3560 spin_lock_init(&adev->uvd_ctx_idx_lock); 3561 spin_lock_init(&adev->didt_idx_lock); 3562 spin_lock_init(&adev->gc_cac_idx_lock); 3563 spin_lock_init(&adev->se_cac_idx_lock); 3564 spin_lock_init(&adev->audio_endpt_idx_lock); 3565 spin_lock_init(&adev->mm_stats.lock); 3566 3567 INIT_LIST_HEAD(&adev->shadow_list); 3568 mutex_init(&adev->shadow_list_lock); 3569 3570 INIT_LIST_HEAD(&adev->reset_list); 3571 3572 INIT_LIST_HEAD(&adev->ras_list); 3573 3574 INIT_DELAYED_WORK(&adev->delayed_init_work, 3575 amdgpu_device_delayed_init_work_handler); 3576 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, 3577 amdgpu_device_delay_enable_gfx_off); 3578 3579 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); 3580 3581 adev->gfx.gfx_off_req_count = 1; 3582 adev->gfx.gfx_off_residency = 0; 3583 adev->gfx.gfx_off_entrycount = 0; 3584 adev->pm.ac_power = power_supply_is_system_supplied() > 0; 3585 3586 atomic_set(&adev->throttling_logging_enabled, 1); 3587 /* 3588 * If throttling continues, logging will be performed every minute 3589 * to avoid log flooding. "-1" is subtracted since the thermal 3590 * throttling interrupt comes every second. Thus, the total logging 3591 * interval is 59 seconds(retelimited printk interval) + 1(waiting 3592 * for throttling interrupt) = 60 seconds. 3593 */ 3594 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); 3595 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); 3596 3597 /* Registers mapping */ 3598 /* TODO: block userspace mapping of io register */ 3599 if (adev->asic_type >= CHIP_BONAIRE) { 3600 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 3601 adev->rmmio_size = pci_resource_len(adev->pdev, 5); 3602 } else { 3603 adev->rmmio_base = pci_resource_start(adev->pdev, 2); 3604 adev->rmmio_size = pci_resource_len(adev->pdev, 2); 3605 } 3606 3607 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) 3608 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); 3609 3610 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); 3611 if (adev->rmmio == NULL) { 3612 return -ENOMEM; 3613 } 3614 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); 3615 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); 3616 3617 amdgpu_device_get_pcie_info(adev); 3618 3619 if (amdgpu_mcbp) 3620 DRM_INFO("MCBP is enabled\n"); 3621 3622 /* 3623 * Reset domain needs to be present early, before XGMI hive discovered 3624 * (if any) and intitialized to use reset sem and in_gpu reset flag 3625 * early on during init and before calling to RREG32. 3626 */ 3627 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); 3628 if (!adev->reset_domain) 3629 return -ENOMEM; 3630 3631 /* detect hw virtualization here */ 3632 amdgpu_detect_virtualization(adev); 3633 3634 r = amdgpu_device_get_job_timeout_settings(adev); 3635 if (r) { 3636 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); 3637 return r; 3638 } 3639 3640 /* early init functions */ 3641 r = amdgpu_device_ip_early_init(adev); 3642 if (r) 3643 return r; 3644 3645 /* Enable TMZ based on IP_VERSION */ 3646 amdgpu_gmc_tmz_set(adev); 3647 3648 amdgpu_gmc_noretry_set(adev); 3649 /* Need to get xgmi info early to decide the reset behavior*/ 3650 if (adev->gmc.xgmi.supported) { 3651 r = adev->gfxhub.funcs->get_xgmi_info(adev); 3652 if (r) 3653 return r; 3654 } 3655 3656 /* enable PCIE atomic ops */ 3657 if (amdgpu_sriov_vf(adev)) 3658 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) 3659 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == 3660 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); 3661 else 3662 adev->have_atomics_support = 3663 !pci_enable_atomic_ops_to_root(adev->pdev, 3664 PCI_EXP_DEVCAP2_ATOMIC_COMP32 | 3665 PCI_EXP_DEVCAP2_ATOMIC_COMP64); 3666 if (!adev->have_atomics_support) 3667 dev_info(adev->dev, "PCIE atomic ops is not supported\n"); 3668 3669 /* doorbell bar mapping and doorbell index init*/ 3670 amdgpu_device_doorbell_init(adev); 3671 3672 if (amdgpu_emu_mode == 1) { 3673 /* post the asic on emulation mode */ 3674 emu_soc_asic_init(adev); 3675 goto fence_driver_init; 3676 } 3677 3678 amdgpu_reset_init(adev); 3679 3680 /* detect if we are with an SRIOV vbios */ 3681 amdgpu_device_detect_sriov_bios(adev); 3682 3683 /* check if we need to reset the asic 3684 * E.g., driver was not cleanly unloaded previously, etc. 3685 */ 3686 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { 3687 if (adev->gmc.xgmi.num_physical_nodes) { 3688 dev_info(adev->dev, "Pending hive reset.\n"); 3689 adev->gmc.xgmi.pending_reset = true; 3690 /* Only need to init necessary block for SMU to handle the reset */ 3691 for (i = 0; i < adev->num_ip_blocks; i++) { 3692 if (!adev->ip_blocks[i].status.valid) 3693 continue; 3694 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3695 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3696 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3697 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) { 3698 DRM_DEBUG("IP %s disabled for hw_init.\n", 3699 adev->ip_blocks[i].version->funcs->name); 3700 adev->ip_blocks[i].status.hw = true; 3701 } 3702 } 3703 } else { 3704 r = amdgpu_asic_reset(adev); 3705 if (r) { 3706 dev_err(adev->dev, "asic reset on init failed\n"); 3707 goto failed; 3708 } 3709 } 3710 } 3711 3712 pci_enable_pcie_error_reporting(adev->pdev); 3713 3714 /* Post card if necessary */ 3715 if (amdgpu_device_need_post(adev)) { 3716 if (!adev->bios) { 3717 dev_err(adev->dev, "no vBIOS found\n"); 3718 r = -EINVAL; 3719 goto failed; 3720 } 3721 DRM_INFO("GPU posting now...\n"); 3722 r = amdgpu_device_asic_init(adev); 3723 if (r) { 3724 dev_err(adev->dev, "gpu post error!\n"); 3725 goto failed; 3726 } 3727 } 3728 3729 if (adev->is_atom_fw) { 3730 /* Initialize clocks */ 3731 r = amdgpu_atomfirmware_get_clock_info(adev); 3732 if (r) { 3733 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); 3734 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 3735 goto failed; 3736 } 3737 } else { 3738 /* Initialize clocks */ 3739 r = amdgpu_atombios_get_clock_info(adev); 3740 if (r) { 3741 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 3742 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 3743 goto failed; 3744 } 3745 /* init i2c buses */ 3746 if (!amdgpu_device_has_dc_support(adev)) 3747 amdgpu_atombios_i2c_init(adev); 3748 } 3749 3750 fence_driver_init: 3751 /* Fence driver */ 3752 r = amdgpu_fence_driver_sw_init(adev); 3753 if (r) { 3754 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); 3755 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); 3756 goto failed; 3757 } 3758 3759 /* init the mode config */ 3760 drm_mode_config_init(adev_to_drm(adev)); 3761 3762 r = amdgpu_device_ip_init(adev); 3763 if (r) { 3764 /* failed in exclusive mode due to timeout */ 3765 if (amdgpu_sriov_vf(adev) && 3766 !amdgpu_sriov_runtime(adev) && 3767 amdgpu_virt_mmio_blocked(adev) && 3768 !amdgpu_virt_wait_reset(adev)) { 3769 dev_err(adev->dev, "VF exclusive mode timeout\n"); 3770 /* Don't send request since VF is inactive. */ 3771 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 3772 adev->virt.ops = NULL; 3773 r = -EAGAIN; 3774 goto release_ras_con; 3775 } 3776 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); 3777 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 3778 goto release_ras_con; 3779 } 3780 3781 amdgpu_fence_driver_hw_init(adev); 3782 3783 dev_info(adev->dev, 3784 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", 3785 adev->gfx.config.max_shader_engines, 3786 adev->gfx.config.max_sh_per_se, 3787 adev->gfx.config.max_cu_per_sh, 3788 adev->gfx.cu_info.number); 3789 3790 adev->accel_working = true; 3791 3792 amdgpu_vm_check_compute_bug(adev); 3793 3794 /* Initialize the buffer migration limit. */ 3795 if (amdgpu_moverate >= 0) 3796 max_MBps = amdgpu_moverate; 3797 else 3798 max_MBps = 8; /* Allow 8 MB/s. */ 3799 /* Get a log2 for easy divisions. */ 3800 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); 3801 3802 r = amdgpu_pm_sysfs_init(adev); 3803 if (r) { 3804 adev->pm_sysfs_en = false; 3805 DRM_ERROR("registering pm debugfs failed (%d).\n", r); 3806 } else 3807 adev->pm_sysfs_en = true; 3808 3809 r = amdgpu_ucode_sysfs_init(adev); 3810 if (r) { 3811 adev->ucode_sysfs_en = false; 3812 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); 3813 } else 3814 adev->ucode_sysfs_en = true; 3815 3816 r = amdgpu_psp_sysfs_init(adev); 3817 if (r) { 3818 adev->psp_sysfs_en = false; 3819 if (!amdgpu_sriov_vf(adev)) 3820 DRM_ERROR("Creating psp sysfs failed\n"); 3821 } else 3822 adev->psp_sysfs_en = true; 3823 3824 /* 3825 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. 3826 * Otherwise the mgpu fan boost feature will be skipped due to the 3827 * gpu instance is counted less. 3828 */ 3829 amdgpu_register_gpu_instance(adev); 3830 3831 /* enable clockgating, etc. after ib tests, etc. since some blocks require 3832 * explicit gating rather than handling it automatically. 3833 */ 3834 if (!adev->gmc.xgmi.pending_reset) { 3835 r = amdgpu_device_ip_late_init(adev); 3836 if (r) { 3837 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); 3838 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); 3839 goto release_ras_con; 3840 } 3841 /* must succeed. */ 3842 amdgpu_ras_resume(adev); 3843 queue_delayed_work(system_wq, &adev->delayed_init_work, 3844 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3845 } 3846 3847 if (amdgpu_sriov_vf(adev)) 3848 flush_delayed_work(&adev->delayed_init_work); 3849 3850 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); 3851 if (r) 3852 dev_err(adev->dev, "Could not create amdgpu device attr\n"); 3853 3854 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 3855 r = amdgpu_pmu_init(adev); 3856 if (r) 3857 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); 3858 3859 /* Have stored pci confspace at hand for restore in sudden PCI error */ 3860 if (amdgpu_device_cache_pci_state(adev->pdev)) 3861 pci_restore_state(pdev); 3862 3863 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 3864 /* this will fail for cards that aren't VGA class devices, just 3865 * ignore it */ 3866 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 3867 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); 3868 3869 if (amdgpu_device_supports_px(ddev)) { 3870 px = true; 3871 vga_switcheroo_register_client(adev->pdev, 3872 &amdgpu_switcheroo_ops, px); 3873 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 3874 } 3875 3876 if (adev->gmc.xgmi.pending_reset) 3877 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, 3878 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3879 3880 amdgpu_device_check_iommu_direct_map(adev); 3881 3882 return 0; 3883 3884 release_ras_con: 3885 amdgpu_release_ras_context(adev); 3886 3887 failed: 3888 amdgpu_vf_error_trans_all(adev); 3889 3890 return r; 3891 } 3892 3893 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) 3894 { 3895 3896 /* Clear all CPU mappings pointing to this device */ 3897 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); 3898 3899 /* Unmap all mapped bars - Doorbell, registers and VRAM */ 3900 amdgpu_device_doorbell_fini(adev); 3901 3902 iounmap(adev->rmmio); 3903 adev->rmmio = NULL; 3904 if (adev->mman.aper_base_kaddr) 3905 iounmap(adev->mman.aper_base_kaddr); 3906 adev->mman.aper_base_kaddr = NULL; 3907 3908 /* Memory manager related */ 3909 if (!adev->gmc.xgmi.connected_to_cpu) { 3910 arch_phys_wc_del(adev->gmc.vram_mtrr); 3911 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); 3912 } 3913 } 3914 3915 /** 3916 * amdgpu_device_fini_hw - tear down the driver 3917 * 3918 * @adev: amdgpu_device pointer 3919 * 3920 * Tear down the driver info (all asics). 3921 * Called at driver shutdown. 3922 */ 3923 void amdgpu_device_fini_hw(struct amdgpu_device *adev) 3924 { 3925 dev_info(adev->dev, "amdgpu: finishing device.\n"); 3926 flush_delayed_work(&adev->delayed_init_work); 3927 adev->shutdown = true; 3928 3929 /* make sure IB test finished before entering exclusive mode 3930 * to avoid preemption on IB test 3931 * */ 3932 if (amdgpu_sriov_vf(adev)) { 3933 amdgpu_virt_request_full_gpu(adev, false); 3934 amdgpu_virt_fini_data_exchange(adev); 3935 } 3936 3937 /* disable all interrupts */ 3938 amdgpu_irq_disable_all(adev); 3939 if (adev->mode_info.mode_config_initialized){ 3940 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) 3941 drm_helper_force_disable_all(adev_to_drm(adev)); 3942 else 3943 drm_atomic_helper_shutdown(adev_to_drm(adev)); 3944 } 3945 amdgpu_fence_driver_hw_fini(adev); 3946 3947 if (adev->mman.initialized) { 3948 flush_delayed_work(&adev->mman.bdev.wq); 3949 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 3950 } 3951 3952 if (adev->pm_sysfs_en) 3953 amdgpu_pm_sysfs_fini(adev); 3954 if (adev->ucode_sysfs_en) 3955 amdgpu_ucode_sysfs_fini(adev); 3956 if (adev->psp_sysfs_en) 3957 amdgpu_psp_sysfs_fini(adev); 3958 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); 3959 3960 /* disable ras feature must before hw fini */ 3961 amdgpu_ras_pre_fini(adev); 3962 3963 amdgpu_device_ip_fini_early(adev); 3964 3965 amdgpu_irq_fini_hw(adev); 3966 3967 if (adev->mman.initialized) 3968 ttm_device_clear_dma_mappings(&adev->mman.bdev); 3969 3970 amdgpu_gart_dummy_page_fini(adev); 3971 3972 if (drm_dev_is_unplugged(adev_to_drm(adev))) 3973 amdgpu_device_unmap_mmio(adev); 3974 3975 } 3976 3977 void amdgpu_device_fini_sw(struct amdgpu_device *adev) 3978 { 3979 int idx; 3980 3981 amdgpu_fence_driver_sw_fini(adev); 3982 amdgpu_device_ip_fini(adev); 3983 release_firmware(adev->firmware.gpu_info_fw); 3984 adev->firmware.gpu_info_fw = NULL; 3985 adev->accel_working = false; 3986 3987 amdgpu_reset_fini(adev); 3988 3989 /* free i2c buses */ 3990 if (!amdgpu_device_has_dc_support(adev)) 3991 amdgpu_i2c_fini(adev); 3992 3993 if (amdgpu_emu_mode != 1) 3994 amdgpu_atombios_fini(adev); 3995 3996 kfree(adev->bios); 3997 adev->bios = NULL; 3998 if (amdgpu_device_supports_px(adev_to_drm(adev))) { 3999 vga_switcheroo_unregister_client(adev->pdev); 4000 vga_switcheroo_fini_domain_pm_ops(adev->dev); 4001 } 4002 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 4003 vga_client_unregister(adev->pdev); 4004 4005 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 4006 4007 iounmap(adev->rmmio); 4008 adev->rmmio = NULL; 4009 amdgpu_device_doorbell_fini(adev); 4010 drm_dev_exit(idx); 4011 } 4012 4013 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 4014 amdgpu_pmu_fini(adev); 4015 if (adev->mman.discovery_bin) 4016 amdgpu_discovery_fini(adev); 4017 4018 amdgpu_reset_put_reset_domain(adev->reset_domain); 4019 adev->reset_domain = NULL; 4020 4021 kfree(adev->pci_state); 4022 4023 } 4024 4025 /** 4026 * amdgpu_device_evict_resources - evict device resources 4027 * @adev: amdgpu device object 4028 * 4029 * Evicts all ttm device resources(vram BOs, gart table) from the lru list 4030 * of the vram memory type. Mainly used for evicting device resources 4031 * at suspend time. 4032 * 4033 */ 4034 static void amdgpu_device_evict_resources(struct amdgpu_device *adev) 4035 { 4036 /* No need to evict vram on APUs for suspend to ram or s2idle */ 4037 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) 4038 return; 4039 4040 if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) 4041 DRM_WARN("evicting device resources failed\n"); 4042 4043 } 4044 4045 /* 4046 * Suspend & resume. 4047 */ 4048 /** 4049 * amdgpu_device_suspend - initiate device suspend 4050 * 4051 * @dev: drm dev pointer 4052 * @fbcon : notify the fbdev of suspend 4053 * 4054 * Puts the hw in the suspend state (all asics). 4055 * Returns 0 for success or an error on failure. 4056 * Called at driver suspend. 4057 */ 4058 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) 4059 { 4060 struct amdgpu_device *adev = drm_to_adev(dev); 4061 4062 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4063 return 0; 4064 4065 adev->in_suspend = true; 4066 4067 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) 4068 DRM_WARN("smart shift update failed\n"); 4069 4070 drm_kms_helper_poll_disable(dev); 4071 4072 if (fbcon) 4073 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); 4074 4075 cancel_delayed_work_sync(&adev->delayed_init_work); 4076 4077 amdgpu_ras_suspend(adev); 4078 4079 amdgpu_device_ip_suspend_phase1(adev); 4080 4081 if (!adev->in_s0ix) 4082 amdgpu_amdkfd_suspend(adev, adev->in_runpm); 4083 4084 amdgpu_device_evict_resources(adev); 4085 4086 amdgpu_fence_driver_hw_fini(adev); 4087 4088 amdgpu_device_ip_suspend_phase2(adev); 4089 4090 return 0; 4091 } 4092 4093 /** 4094 * amdgpu_device_resume - initiate device resume 4095 * 4096 * @dev: drm dev pointer 4097 * @fbcon : notify the fbdev of resume 4098 * 4099 * Bring the hw back to operating state (all asics). 4100 * Returns 0 for success or an error on failure. 4101 * Called at driver resume. 4102 */ 4103 int amdgpu_device_resume(struct drm_device *dev, bool fbcon) 4104 { 4105 struct amdgpu_device *adev = drm_to_adev(dev); 4106 int r = 0; 4107 4108 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4109 return 0; 4110 4111 if (adev->in_s0ix) 4112 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry); 4113 4114 /* post card */ 4115 if (amdgpu_device_need_post(adev)) { 4116 r = amdgpu_device_asic_init(adev); 4117 if (r) 4118 dev_err(adev->dev, "amdgpu asic init failed\n"); 4119 } 4120 4121 r = amdgpu_device_ip_resume(adev); 4122 if (r) { 4123 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); 4124 return r; 4125 } 4126 amdgpu_fence_driver_hw_init(adev); 4127 4128 r = amdgpu_device_ip_late_init(adev); 4129 if (r) 4130 return r; 4131 4132 queue_delayed_work(system_wq, &adev->delayed_init_work, 4133 msecs_to_jiffies(AMDGPU_RESUME_MS)); 4134 4135 if (!adev->in_s0ix) { 4136 r = amdgpu_amdkfd_resume(adev, adev->in_runpm); 4137 if (r) 4138 return r; 4139 } 4140 4141 /* Make sure IB tests flushed */ 4142 flush_delayed_work(&adev->delayed_init_work); 4143 4144 if (fbcon) 4145 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); 4146 4147 drm_kms_helper_poll_enable(dev); 4148 4149 amdgpu_ras_resume(adev); 4150 4151 /* 4152 * Most of the connector probing functions try to acquire runtime pm 4153 * refs to ensure that the GPU is powered on when connector polling is 4154 * performed. Since we're calling this from a runtime PM callback, 4155 * trying to acquire rpm refs will cause us to deadlock. 4156 * 4157 * Since we're guaranteed to be holding the rpm lock, it's safe to 4158 * temporarily disable the rpm helpers so this doesn't deadlock us. 4159 */ 4160 #ifdef CONFIG_PM 4161 dev->dev->power.disable_depth++; 4162 #endif 4163 if (!amdgpu_device_has_dc_support(adev)) 4164 drm_helper_hpd_irq_event(dev); 4165 else 4166 drm_kms_helper_hotplug_event(dev); 4167 #ifdef CONFIG_PM 4168 dev->dev->power.disable_depth--; 4169 #endif 4170 adev->in_suspend = false; 4171 4172 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) 4173 DRM_WARN("smart shift update failed\n"); 4174 4175 return 0; 4176 } 4177 4178 /** 4179 * amdgpu_device_ip_check_soft_reset - did soft reset succeed 4180 * 4181 * @adev: amdgpu_device pointer 4182 * 4183 * The list of all the hardware IPs that make up the asic is walked and 4184 * the check_soft_reset callbacks are run. check_soft_reset determines 4185 * if the asic is still hung or not. 4186 * Returns true if any of the IPs are still in a hung state, false if not. 4187 */ 4188 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) 4189 { 4190 int i; 4191 bool asic_hang = false; 4192 4193 if (amdgpu_sriov_vf(adev)) 4194 return true; 4195 4196 if (amdgpu_asic_need_full_reset(adev)) 4197 return true; 4198 4199 for (i = 0; i < adev->num_ip_blocks; i++) { 4200 if (!adev->ip_blocks[i].status.valid) 4201 continue; 4202 if (adev->ip_blocks[i].version->funcs->check_soft_reset) 4203 adev->ip_blocks[i].status.hang = 4204 adev->ip_blocks[i].version->funcs->check_soft_reset(adev); 4205 if (adev->ip_blocks[i].status.hang) { 4206 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); 4207 asic_hang = true; 4208 } 4209 } 4210 return asic_hang; 4211 } 4212 4213 /** 4214 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset 4215 * 4216 * @adev: amdgpu_device pointer 4217 * 4218 * The list of all the hardware IPs that make up the asic is walked and the 4219 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset 4220 * handles any IP specific hardware or software state changes that are 4221 * necessary for a soft reset to succeed. 4222 * Returns 0 on success, negative error code on failure. 4223 */ 4224 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) 4225 { 4226 int i, r = 0; 4227 4228 for (i = 0; i < adev->num_ip_blocks; i++) { 4229 if (!adev->ip_blocks[i].status.valid) 4230 continue; 4231 if (adev->ip_blocks[i].status.hang && 4232 adev->ip_blocks[i].version->funcs->pre_soft_reset) { 4233 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); 4234 if (r) 4235 return r; 4236 } 4237 } 4238 4239 return 0; 4240 } 4241 4242 /** 4243 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed 4244 * 4245 * @adev: amdgpu_device pointer 4246 * 4247 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu 4248 * reset is necessary to recover. 4249 * Returns true if a full asic reset is required, false if not. 4250 */ 4251 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) 4252 { 4253 int i; 4254 4255 if (amdgpu_asic_need_full_reset(adev)) 4256 return true; 4257 4258 for (i = 0; i < adev->num_ip_blocks; i++) { 4259 if (!adev->ip_blocks[i].status.valid) 4260 continue; 4261 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || 4262 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || 4263 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || 4264 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || 4265 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 4266 if (adev->ip_blocks[i].status.hang) { 4267 dev_info(adev->dev, "Some block need full reset!\n"); 4268 return true; 4269 } 4270 } 4271 } 4272 return false; 4273 } 4274 4275 /** 4276 * amdgpu_device_ip_soft_reset - do a soft reset 4277 * 4278 * @adev: amdgpu_device pointer 4279 * 4280 * The list of all the hardware IPs that make up the asic is walked and the 4281 * soft_reset callbacks are run if the block is hung. soft_reset handles any 4282 * IP specific hardware or software state changes that are necessary to soft 4283 * reset the IP. 4284 * Returns 0 on success, negative error code on failure. 4285 */ 4286 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) 4287 { 4288 int i, r = 0; 4289 4290 for (i = 0; i < adev->num_ip_blocks; i++) { 4291 if (!adev->ip_blocks[i].status.valid) 4292 continue; 4293 if (adev->ip_blocks[i].status.hang && 4294 adev->ip_blocks[i].version->funcs->soft_reset) { 4295 r = adev->ip_blocks[i].version->funcs->soft_reset(adev); 4296 if (r) 4297 return r; 4298 } 4299 } 4300 4301 return 0; 4302 } 4303 4304 /** 4305 * amdgpu_device_ip_post_soft_reset - clean up from soft reset 4306 * 4307 * @adev: amdgpu_device pointer 4308 * 4309 * The list of all the hardware IPs that make up the asic is walked and the 4310 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset 4311 * handles any IP specific hardware or software state changes that are 4312 * necessary after the IP has been soft reset. 4313 * Returns 0 on success, negative error code on failure. 4314 */ 4315 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) 4316 { 4317 int i, r = 0; 4318 4319 for (i = 0; i < adev->num_ip_blocks; i++) { 4320 if (!adev->ip_blocks[i].status.valid) 4321 continue; 4322 if (adev->ip_blocks[i].status.hang && 4323 adev->ip_blocks[i].version->funcs->post_soft_reset) 4324 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); 4325 if (r) 4326 return r; 4327 } 4328 4329 return 0; 4330 } 4331 4332 /** 4333 * amdgpu_device_recover_vram - Recover some VRAM contents 4334 * 4335 * @adev: amdgpu_device pointer 4336 * 4337 * Restores the contents of VRAM buffers from the shadows in GTT. Used to 4338 * restore things like GPUVM page tables after a GPU reset where 4339 * the contents of VRAM might be lost. 4340 * 4341 * Returns: 4342 * 0 on success, negative error code on failure. 4343 */ 4344 static int amdgpu_device_recover_vram(struct amdgpu_device *adev) 4345 { 4346 struct dma_fence *fence = NULL, *next = NULL; 4347 struct amdgpu_bo *shadow; 4348 struct amdgpu_bo_vm *vmbo; 4349 long r = 1, tmo; 4350 4351 if (amdgpu_sriov_runtime(adev)) 4352 tmo = msecs_to_jiffies(8000); 4353 else 4354 tmo = msecs_to_jiffies(100); 4355 4356 dev_info(adev->dev, "recover vram bo from shadow start\n"); 4357 mutex_lock(&adev->shadow_list_lock); 4358 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { 4359 shadow = &vmbo->bo; 4360 /* No need to recover an evicted BO */ 4361 if (shadow->tbo.resource->mem_type != TTM_PL_TT || 4362 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || 4363 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) 4364 continue; 4365 4366 r = amdgpu_bo_restore_shadow(shadow, &next); 4367 if (r) 4368 break; 4369 4370 if (fence) { 4371 tmo = dma_fence_wait_timeout(fence, false, tmo); 4372 dma_fence_put(fence); 4373 fence = next; 4374 if (tmo == 0) { 4375 r = -ETIMEDOUT; 4376 break; 4377 } else if (tmo < 0) { 4378 r = tmo; 4379 break; 4380 } 4381 } else { 4382 fence = next; 4383 } 4384 } 4385 mutex_unlock(&adev->shadow_list_lock); 4386 4387 if (fence) 4388 tmo = dma_fence_wait_timeout(fence, false, tmo); 4389 dma_fence_put(fence); 4390 4391 if (r < 0 || tmo <= 0) { 4392 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); 4393 return -EIO; 4394 } 4395 4396 dev_info(adev->dev, "recover vram bo from shadow done\n"); 4397 return 0; 4398 } 4399 4400 4401 /** 4402 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf 4403 * 4404 * @adev: amdgpu_device pointer 4405 * @from_hypervisor: request from hypervisor 4406 * 4407 * do VF FLR and reinitialize Asic 4408 * return 0 means succeeded otherwise failed 4409 */ 4410 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, 4411 bool from_hypervisor) 4412 { 4413 int r; 4414 struct amdgpu_hive_info *hive = NULL; 4415 int retry_limit = 0; 4416 4417 retry: 4418 amdgpu_amdkfd_pre_reset(adev); 4419 4420 if (from_hypervisor) 4421 r = amdgpu_virt_request_full_gpu(adev, true); 4422 else 4423 r = amdgpu_virt_reset_gpu(adev); 4424 if (r) 4425 return r; 4426 4427 /* Resume IP prior to SMC */ 4428 r = amdgpu_device_ip_reinit_early_sriov(adev); 4429 if (r) 4430 goto error; 4431 4432 amdgpu_virt_init_data_exchange(adev); 4433 4434 r = amdgpu_device_fw_loading(adev); 4435 if (r) 4436 return r; 4437 4438 /* now we are okay to resume SMC/CP/SDMA */ 4439 r = amdgpu_device_ip_reinit_late_sriov(adev); 4440 if (r) 4441 goto error; 4442 4443 hive = amdgpu_get_xgmi_hive(adev); 4444 /* Update PSP FW topology after reset */ 4445 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) 4446 r = amdgpu_xgmi_update_topology(hive, adev); 4447 4448 if (hive) 4449 amdgpu_put_xgmi_hive(hive); 4450 4451 if (!r) { 4452 amdgpu_irq_gpu_reset_resume_helper(adev); 4453 r = amdgpu_ib_ring_tests(adev); 4454 4455 amdgpu_amdkfd_post_reset(adev); 4456 } 4457 4458 error: 4459 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 4460 amdgpu_inc_vram_lost(adev); 4461 r = amdgpu_device_recover_vram(adev); 4462 } 4463 amdgpu_virt_release_full_gpu(adev, true); 4464 4465 if (AMDGPU_RETRY_SRIOV_RESET(r)) { 4466 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) { 4467 retry_limit++; 4468 goto retry; 4469 } else 4470 DRM_ERROR("GPU reset retry is beyond the retry limit\n"); 4471 } 4472 4473 return r; 4474 } 4475 4476 /** 4477 * amdgpu_device_has_job_running - check if there is any job in mirror list 4478 * 4479 * @adev: amdgpu_device pointer 4480 * 4481 * check if there is any job in mirror list 4482 */ 4483 bool amdgpu_device_has_job_running(struct amdgpu_device *adev) 4484 { 4485 int i; 4486 struct drm_sched_job *job; 4487 4488 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4489 struct amdgpu_ring *ring = adev->rings[i]; 4490 4491 if (!ring || !ring->sched.thread) 4492 continue; 4493 4494 spin_lock(&ring->sched.job_list_lock); 4495 job = list_first_entry_or_null(&ring->sched.pending_list, 4496 struct drm_sched_job, list); 4497 spin_unlock(&ring->sched.job_list_lock); 4498 if (job) 4499 return true; 4500 } 4501 return false; 4502 } 4503 4504 /** 4505 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery 4506 * 4507 * @adev: amdgpu_device pointer 4508 * 4509 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover 4510 * a hung GPU. 4511 */ 4512 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) 4513 { 4514 4515 if (amdgpu_gpu_recovery == 0) 4516 goto disabled; 4517 4518 if (!amdgpu_device_ip_check_soft_reset(adev)) { 4519 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n"); 4520 return false; 4521 } 4522 4523 if (amdgpu_sriov_vf(adev)) 4524 return true; 4525 4526 if (amdgpu_gpu_recovery == -1) { 4527 switch (adev->asic_type) { 4528 #ifdef CONFIG_DRM_AMDGPU_SI 4529 case CHIP_VERDE: 4530 case CHIP_TAHITI: 4531 case CHIP_PITCAIRN: 4532 case CHIP_OLAND: 4533 case CHIP_HAINAN: 4534 #endif 4535 #ifdef CONFIG_DRM_AMDGPU_CIK 4536 case CHIP_KAVERI: 4537 case CHIP_KABINI: 4538 case CHIP_MULLINS: 4539 #endif 4540 case CHIP_CARRIZO: 4541 case CHIP_STONEY: 4542 case CHIP_CYAN_SKILLFISH: 4543 goto disabled; 4544 default: 4545 break; 4546 } 4547 } 4548 4549 return true; 4550 4551 disabled: 4552 dev_info(adev->dev, "GPU recovery disabled.\n"); 4553 return false; 4554 } 4555 4556 int amdgpu_device_mode1_reset(struct amdgpu_device *adev) 4557 { 4558 u32 i; 4559 int ret = 0; 4560 4561 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 4562 4563 dev_info(adev->dev, "GPU mode1 reset\n"); 4564 4565 /* disable BM */ 4566 pci_clear_master(adev->pdev); 4567 4568 amdgpu_device_cache_pci_state(adev->pdev); 4569 4570 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { 4571 dev_info(adev->dev, "GPU smu mode1 reset\n"); 4572 ret = amdgpu_dpm_mode1_reset(adev); 4573 } else { 4574 dev_info(adev->dev, "GPU psp mode1 reset\n"); 4575 ret = psp_gpu_reset(adev); 4576 } 4577 4578 if (ret) 4579 dev_err(adev->dev, "GPU mode1 reset failed\n"); 4580 4581 amdgpu_device_load_pci_state(adev->pdev); 4582 4583 /* wait for asic to come out of reset */ 4584 for (i = 0; i < adev->usec_timeout; i++) { 4585 u32 memsize = adev->nbio.funcs->get_memsize(adev); 4586 4587 if (memsize != 0xffffffff) 4588 break; 4589 udelay(1); 4590 } 4591 4592 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 4593 return ret; 4594 } 4595 4596 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 4597 struct amdgpu_reset_context *reset_context) 4598 { 4599 int i, r = 0; 4600 struct amdgpu_job *job = NULL; 4601 bool need_full_reset = 4602 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4603 4604 if (reset_context->reset_req_dev == adev) 4605 job = reset_context->job; 4606 4607 if (amdgpu_sriov_vf(adev)) { 4608 /* stop the data exchange thread */ 4609 amdgpu_virt_fini_data_exchange(adev); 4610 } 4611 4612 amdgpu_fence_driver_isr_toggle(adev, true); 4613 4614 /* block all schedulers and reset given job's ring */ 4615 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4616 struct amdgpu_ring *ring = adev->rings[i]; 4617 4618 if (!ring || !ring->sched.thread) 4619 continue; 4620 4621 /*clear job fence from fence drv to avoid force_completion 4622 *leave NULL and vm flush fence in fence drv */ 4623 amdgpu_fence_driver_clear_job_fences(ring); 4624 4625 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 4626 amdgpu_fence_driver_force_completion(ring); 4627 } 4628 4629 amdgpu_fence_driver_isr_toggle(adev, false); 4630 4631 if (job && job->vm) 4632 drm_sched_increase_karma(&job->base); 4633 4634 r = amdgpu_reset_prepare_hwcontext(adev, reset_context); 4635 /* If reset handler not implemented, continue; otherwise return */ 4636 if (r == -ENOSYS) 4637 r = 0; 4638 else 4639 return r; 4640 4641 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */ 4642 if (!amdgpu_sriov_vf(adev)) { 4643 4644 if (!need_full_reset) 4645 need_full_reset = amdgpu_device_ip_need_full_reset(adev); 4646 4647 if (!need_full_reset && amdgpu_gpu_recovery) { 4648 amdgpu_device_ip_pre_soft_reset(adev); 4649 r = amdgpu_device_ip_soft_reset(adev); 4650 amdgpu_device_ip_post_soft_reset(adev); 4651 if (r || amdgpu_device_ip_check_soft_reset(adev)) { 4652 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); 4653 need_full_reset = true; 4654 } 4655 } 4656 4657 if (need_full_reset) 4658 r = amdgpu_device_ip_suspend(adev); 4659 if (need_full_reset) 4660 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4661 else 4662 clear_bit(AMDGPU_NEED_FULL_RESET, 4663 &reset_context->flags); 4664 } 4665 4666 return r; 4667 } 4668 4669 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev) 4670 { 4671 int i; 4672 4673 lockdep_assert_held(&adev->reset_domain->sem); 4674 4675 for (i = 0; i < adev->num_regs; i++) { 4676 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]); 4677 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 4678 adev->reset_dump_reg_value[i]); 4679 } 4680 4681 return 0; 4682 } 4683 4684 #ifdef CONFIG_DEV_COREDUMP 4685 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset, 4686 size_t count, void *data, size_t datalen) 4687 { 4688 struct drm_printer p; 4689 struct amdgpu_device *adev = data; 4690 struct drm_print_iterator iter; 4691 int i; 4692 4693 iter.data = buffer; 4694 iter.offset = 0; 4695 iter.start = offset; 4696 iter.remain = count; 4697 4698 p = drm_coredump_printer(&iter); 4699 4700 drm_printf(&p, "**** AMDGPU Device Coredump ****\n"); 4701 drm_printf(&p, "kernel: " UTS_RELEASE "\n"); 4702 drm_printf(&p, "module: " KBUILD_MODNAME "\n"); 4703 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec); 4704 if (adev->reset_task_info.pid) 4705 drm_printf(&p, "process_name: %s PID: %d\n", 4706 adev->reset_task_info.process_name, 4707 adev->reset_task_info.pid); 4708 4709 if (adev->reset_vram_lost) 4710 drm_printf(&p, "VRAM is lost due to GPU reset!\n"); 4711 if (adev->num_regs) { 4712 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n"); 4713 4714 for (i = 0; i < adev->num_regs; i++) 4715 drm_printf(&p, "0x%08x: 0x%08x\n", 4716 adev->reset_dump_reg_list[i], 4717 adev->reset_dump_reg_value[i]); 4718 } 4719 4720 return count - iter.remain; 4721 } 4722 4723 static void amdgpu_devcoredump_free(void *data) 4724 { 4725 } 4726 4727 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev) 4728 { 4729 struct drm_device *dev = adev_to_drm(adev); 4730 4731 ktime_get_ts64(&adev->reset_time); 4732 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL, 4733 amdgpu_devcoredump_read, amdgpu_devcoredump_free); 4734 } 4735 #endif 4736 4737 int amdgpu_do_asic_reset(struct list_head *device_list_handle, 4738 struct amdgpu_reset_context *reset_context) 4739 { 4740 struct amdgpu_device *tmp_adev = NULL; 4741 bool need_full_reset, skip_hw_reset, vram_lost = false; 4742 int r = 0; 4743 4744 /* Try reset handler method first */ 4745 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 4746 reset_list); 4747 amdgpu_reset_reg_dumps(tmp_adev); 4748 4749 reset_context->reset_device_list = device_list_handle; 4750 r = amdgpu_reset_perform_reset(tmp_adev, reset_context); 4751 /* If reset handler not implemented, continue; otherwise return */ 4752 if (r == -ENOSYS) 4753 r = 0; 4754 else 4755 return r; 4756 4757 /* Reset handler not implemented, use the default method */ 4758 need_full_reset = 4759 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4760 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); 4761 4762 /* 4763 * ASIC reset has to be done on all XGMI hive nodes ASAP 4764 * to allow proper links negotiation in FW (within 1 sec) 4765 */ 4766 if (!skip_hw_reset && need_full_reset) { 4767 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4768 /* For XGMI run all resets in parallel to speed up the process */ 4769 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 4770 tmp_adev->gmc.xgmi.pending_reset = false; 4771 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) 4772 r = -EALREADY; 4773 } else 4774 r = amdgpu_asic_reset(tmp_adev); 4775 4776 if (r) { 4777 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", 4778 r, adev_to_drm(tmp_adev)->unique); 4779 break; 4780 } 4781 } 4782 4783 /* For XGMI wait for all resets to complete before proceed */ 4784 if (!r) { 4785 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4786 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 4787 flush_work(&tmp_adev->xgmi_reset_work); 4788 r = tmp_adev->asic_reset_res; 4789 if (r) 4790 break; 4791 } 4792 } 4793 } 4794 } 4795 4796 if (!r && amdgpu_ras_intr_triggered()) { 4797 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4798 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops && 4799 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) 4800 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev); 4801 } 4802 4803 amdgpu_ras_intr_cleared(); 4804 } 4805 4806 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4807 if (need_full_reset) { 4808 /* post card */ 4809 r = amdgpu_device_asic_init(tmp_adev); 4810 if (r) { 4811 dev_warn(tmp_adev->dev, "asic atom init failed!"); 4812 } else { 4813 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); 4814 r = amdgpu_amdkfd_resume_iommu(tmp_adev); 4815 if (r) 4816 goto out; 4817 4818 r = amdgpu_device_ip_resume_phase1(tmp_adev); 4819 if (r) 4820 goto out; 4821 4822 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); 4823 #ifdef CONFIG_DEV_COREDUMP 4824 tmp_adev->reset_vram_lost = vram_lost; 4825 memset(&tmp_adev->reset_task_info, 0, 4826 sizeof(tmp_adev->reset_task_info)); 4827 if (reset_context->job && reset_context->job->vm) 4828 tmp_adev->reset_task_info = 4829 reset_context->job->vm->task_info; 4830 amdgpu_reset_capture_coredumpm(tmp_adev); 4831 #endif 4832 if (vram_lost) { 4833 DRM_INFO("VRAM is lost due to GPU reset!\n"); 4834 amdgpu_inc_vram_lost(tmp_adev); 4835 } 4836 4837 r = amdgpu_device_fw_loading(tmp_adev); 4838 if (r) 4839 return r; 4840 4841 r = amdgpu_device_ip_resume_phase2(tmp_adev); 4842 if (r) 4843 goto out; 4844 4845 if (vram_lost) 4846 amdgpu_device_fill_reset_magic(tmp_adev); 4847 4848 /* 4849 * Add this ASIC as tracked as reset was already 4850 * complete successfully. 4851 */ 4852 amdgpu_register_gpu_instance(tmp_adev); 4853 4854 if (!reset_context->hive && 4855 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 4856 amdgpu_xgmi_add_device(tmp_adev); 4857 4858 r = amdgpu_device_ip_late_init(tmp_adev); 4859 if (r) 4860 goto out; 4861 4862 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false); 4863 4864 /* 4865 * The GPU enters bad state once faulty pages 4866 * by ECC has reached the threshold, and ras 4867 * recovery is scheduled next. So add one check 4868 * here to break recovery if it indeed exceeds 4869 * bad page threshold, and remind user to 4870 * retire this GPU or setting one bigger 4871 * bad_page_threshold value to fix this once 4872 * probing driver again. 4873 */ 4874 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) { 4875 /* must succeed. */ 4876 amdgpu_ras_resume(tmp_adev); 4877 } else { 4878 r = -EINVAL; 4879 goto out; 4880 } 4881 4882 /* Update PSP FW topology after reset */ 4883 if (reset_context->hive && 4884 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 4885 r = amdgpu_xgmi_update_topology( 4886 reset_context->hive, tmp_adev); 4887 } 4888 } 4889 4890 out: 4891 if (!r) { 4892 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); 4893 r = amdgpu_ib_ring_tests(tmp_adev); 4894 if (r) { 4895 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); 4896 need_full_reset = true; 4897 r = -EAGAIN; 4898 goto end; 4899 } 4900 } 4901 4902 if (!r) 4903 r = amdgpu_device_recover_vram(tmp_adev); 4904 else 4905 tmp_adev->asic_reset_res = r; 4906 } 4907 4908 end: 4909 if (need_full_reset) 4910 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4911 else 4912 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4913 return r; 4914 } 4915 4916 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev) 4917 { 4918 4919 switch (amdgpu_asic_reset_method(adev)) { 4920 case AMD_RESET_METHOD_MODE1: 4921 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; 4922 break; 4923 case AMD_RESET_METHOD_MODE2: 4924 adev->mp1_state = PP_MP1_STATE_RESET; 4925 break; 4926 default: 4927 adev->mp1_state = PP_MP1_STATE_NONE; 4928 break; 4929 } 4930 } 4931 4932 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev) 4933 { 4934 amdgpu_vf_error_trans_all(adev); 4935 adev->mp1_state = PP_MP1_STATE_NONE; 4936 } 4937 4938 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) 4939 { 4940 struct pci_dev *p = NULL; 4941 4942 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 4943 adev->pdev->bus->number, 1); 4944 if (p) { 4945 pm_runtime_enable(&(p->dev)); 4946 pm_runtime_resume(&(p->dev)); 4947 } 4948 } 4949 4950 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) 4951 { 4952 enum amd_reset_method reset_method; 4953 struct pci_dev *p = NULL; 4954 u64 expires; 4955 4956 /* 4957 * For now, only BACO and mode1 reset are confirmed 4958 * to suffer the audio issue without proper suspended. 4959 */ 4960 reset_method = amdgpu_asic_reset_method(adev); 4961 if ((reset_method != AMD_RESET_METHOD_BACO) && 4962 (reset_method != AMD_RESET_METHOD_MODE1)) 4963 return -EINVAL; 4964 4965 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 4966 adev->pdev->bus->number, 1); 4967 if (!p) 4968 return -ENODEV; 4969 4970 expires = pm_runtime_autosuspend_expiration(&(p->dev)); 4971 if (!expires) 4972 /* 4973 * If we cannot get the audio device autosuspend delay, 4974 * a fixed 4S interval will be used. Considering 3S is 4975 * the audio controller default autosuspend delay setting. 4976 * 4S used here is guaranteed to cover that. 4977 */ 4978 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL; 4979 4980 while (!pm_runtime_status_suspended(&(p->dev))) { 4981 if (!pm_runtime_suspend(&(p->dev))) 4982 break; 4983 4984 if (expires < ktime_get_mono_fast_ns()) { 4985 dev_warn(adev->dev, "failed to suspend display audio\n"); 4986 /* TODO: abort the succeeding gpu reset? */ 4987 return -ETIMEDOUT; 4988 } 4989 } 4990 4991 pm_runtime_disable(&(p->dev)); 4992 4993 return 0; 4994 } 4995 4996 static void amdgpu_device_recheck_guilty_jobs( 4997 struct amdgpu_device *adev, struct list_head *device_list_handle, 4998 struct amdgpu_reset_context *reset_context) 4999 { 5000 int i, r = 0; 5001 5002 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5003 struct amdgpu_ring *ring = adev->rings[i]; 5004 int ret = 0; 5005 struct drm_sched_job *s_job; 5006 5007 if (!ring || !ring->sched.thread) 5008 continue; 5009 5010 s_job = list_first_entry_or_null(&ring->sched.pending_list, 5011 struct drm_sched_job, list); 5012 if (s_job == NULL) 5013 continue; 5014 5015 /* clear job's guilty and depend the folowing step to decide the real one */ 5016 drm_sched_reset_karma(s_job); 5017 drm_sched_resubmit_jobs_ext(&ring->sched, 1); 5018 5019 if (!s_job->s_fence->parent) { 5020 DRM_WARN("Failed to get a HW fence for job!"); 5021 continue; 5022 } 5023 5024 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); 5025 if (ret == 0) { /* timeout */ 5026 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n", 5027 ring->sched.name, s_job->id); 5028 5029 5030 amdgpu_fence_driver_isr_toggle(adev, true); 5031 5032 /* Clear this failed job from fence array */ 5033 amdgpu_fence_driver_clear_job_fences(ring); 5034 5035 amdgpu_fence_driver_isr_toggle(adev, false); 5036 5037 /* Since the job won't signal and we go for 5038 * another resubmit drop this parent pointer 5039 */ 5040 dma_fence_put(s_job->s_fence->parent); 5041 s_job->s_fence->parent = NULL; 5042 5043 /* set guilty */ 5044 drm_sched_increase_karma(s_job); 5045 amdgpu_reset_prepare_hwcontext(adev, reset_context); 5046 retry: 5047 /* do hw reset */ 5048 if (amdgpu_sriov_vf(adev)) { 5049 amdgpu_virt_fini_data_exchange(adev); 5050 r = amdgpu_device_reset_sriov(adev, false); 5051 if (r) 5052 adev->asic_reset_res = r; 5053 } else { 5054 clear_bit(AMDGPU_SKIP_HW_RESET, 5055 &reset_context->flags); 5056 r = amdgpu_do_asic_reset(device_list_handle, 5057 reset_context); 5058 if (r && r == -EAGAIN) 5059 goto retry; 5060 } 5061 5062 /* 5063 * add reset counter so that the following 5064 * resubmitted job could flush vmid 5065 */ 5066 atomic_inc(&adev->gpu_reset_counter); 5067 continue; 5068 } 5069 5070 /* got the hw fence, signal finished fence */ 5071 atomic_dec(ring->sched.score); 5072 dma_fence_get(&s_job->s_fence->finished); 5073 dma_fence_signal(&s_job->s_fence->finished); 5074 dma_fence_put(&s_job->s_fence->finished); 5075 5076 /* remove node from list and free the job */ 5077 spin_lock(&ring->sched.job_list_lock); 5078 list_del_init(&s_job->list); 5079 spin_unlock(&ring->sched.job_list_lock); 5080 ring->sched.ops->free_job(s_job); 5081 } 5082 } 5083 5084 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) 5085 { 5086 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5087 5088 #if defined(CONFIG_DEBUG_FS) 5089 if (!amdgpu_sriov_vf(adev)) 5090 cancel_work(&adev->reset_work); 5091 #endif 5092 5093 if (adev->kfd.dev) 5094 cancel_work(&adev->kfd.reset_work); 5095 5096 if (amdgpu_sriov_vf(adev)) 5097 cancel_work(&adev->virt.flr_work); 5098 5099 if (con && adev->ras_enabled) 5100 cancel_work(&con->recovery_work); 5101 5102 } 5103 5104 5105 /** 5106 * amdgpu_device_gpu_recover - reset the asic and recover scheduler 5107 * 5108 * @adev: amdgpu_device pointer 5109 * @job: which job trigger hang 5110 * 5111 * Attempt to reset the GPU if it has hung (all asics). 5112 * Attempt to do soft-reset or full-reset and reinitialize Asic 5113 * Returns 0 for success or an error on failure. 5114 */ 5115 5116 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 5117 struct amdgpu_job *job, 5118 struct amdgpu_reset_context *reset_context) 5119 { 5120 struct list_head device_list, *device_list_handle = NULL; 5121 bool job_signaled = false; 5122 struct amdgpu_hive_info *hive = NULL; 5123 struct amdgpu_device *tmp_adev = NULL; 5124 int i, r = 0; 5125 bool need_emergency_restart = false; 5126 bool audio_suspended = false; 5127 int tmp_vram_lost_counter; 5128 5129 /* 5130 * Special case: RAS triggered and full reset isn't supported 5131 */ 5132 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); 5133 5134 /* 5135 * Flush RAM to disk so that after reboot 5136 * the user can read log and see why the system rebooted. 5137 */ 5138 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) { 5139 DRM_WARN("Emergency reboot."); 5140 5141 ksys_sync_helper(); 5142 emergency_restart(); 5143 } 5144 5145 dev_info(adev->dev, "GPU %s begin!\n", 5146 need_emergency_restart ? "jobs stop":"reset"); 5147 5148 if (!amdgpu_sriov_vf(adev)) 5149 hive = amdgpu_get_xgmi_hive(adev); 5150 if (hive) 5151 mutex_lock(&hive->hive_lock); 5152 5153 reset_context->job = job; 5154 reset_context->hive = hive; 5155 5156 /* 5157 * Build list of devices to reset. 5158 * In case we are in XGMI hive mode, resort the device list 5159 * to put adev in the 1st position. 5160 */ 5161 INIT_LIST_HEAD(&device_list); 5162 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { 5163 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 5164 list_add_tail(&tmp_adev->reset_list, &device_list); 5165 if (!list_is_first(&adev->reset_list, &device_list)) 5166 list_rotate_to_front(&adev->reset_list, &device_list); 5167 device_list_handle = &device_list; 5168 } else { 5169 list_add_tail(&adev->reset_list, &device_list); 5170 device_list_handle = &device_list; 5171 } 5172 5173 /* We need to lock reset domain only once both for XGMI and single device */ 5174 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 5175 reset_list); 5176 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); 5177 5178 /* block all schedulers and reset given job's ring */ 5179 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5180 5181 amdgpu_device_set_mp1_state(tmp_adev); 5182 5183 /* 5184 * Try to put the audio codec into suspend state 5185 * before gpu reset started. 5186 * 5187 * Due to the power domain of the graphics device 5188 * is shared with AZ power domain. Without this, 5189 * we may change the audio hardware from behind 5190 * the audio driver's back. That will trigger 5191 * some audio codec errors. 5192 */ 5193 if (!amdgpu_device_suspend_display_audio(tmp_adev)) 5194 audio_suspended = true; 5195 5196 amdgpu_ras_set_error_query_ready(tmp_adev, false); 5197 5198 cancel_delayed_work_sync(&tmp_adev->delayed_init_work); 5199 5200 if (!amdgpu_sriov_vf(tmp_adev)) 5201 amdgpu_amdkfd_pre_reset(tmp_adev); 5202 5203 /* 5204 * Mark these ASICs to be reseted as untracked first 5205 * And add them back after reset completed 5206 */ 5207 amdgpu_unregister_gpu_instance(tmp_adev); 5208 5209 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true); 5210 5211 /* disable ras on ALL IPs */ 5212 if (!need_emergency_restart && 5213 amdgpu_device_ip_need_full_reset(tmp_adev)) 5214 amdgpu_ras_suspend(tmp_adev); 5215 5216 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5217 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5218 5219 if (!ring || !ring->sched.thread) 5220 continue; 5221 5222 drm_sched_stop(&ring->sched, job ? &job->base : NULL); 5223 5224 if (need_emergency_restart) 5225 amdgpu_job_stop_all_jobs_on_sched(&ring->sched); 5226 } 5227 atomic_inc(&tmp_adev->gpu_reset_counter); 5228 } 5229 5230 if (need_emergency_restart) 5231 goto skip_sched_resume; 5232 5233 /* 5234 * Must check guilty signal here since after this point all old 5235 * HW fences are force signaled. 5236 * 5237 * job->base holds a reference to parent fence 5238 */ 5239 if (job && dma_fence_is_signaled(&job->hw_fence)) { 5240 job_signaled = true; 5241 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); 5242 goto skip_hw_reset; 5243 } 5244 5245 retry: /* Rest of adevs pre asic reset from XGMI hive. */ 5246 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5247 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context); 5248 /*TODO Should we stop ?*/ 5249 if (r) { 5250 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", 5251 r, adev_to_drm(tmp_adev)->unique); 5252 tmp_adev->asic_reset_res = r; 5253 } 5254 5255 /* 5256 * Drop all pending non scheduler resets. Scheduler resets 5257 * were already dropped during drm_sched_stop 5258 */ 5259 amdgpu_device_stop_pending_resets(tmp_adev); 5260 } 5261 5262 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); 5263 /* Actual ASIC resets if needed.*/ 5264 /* Host driver will handle XGMI hive reset for SRIOV */ 5265 if (amdgpu_sriov_vf(adev)) { 5266 r = amdgpu_device_reset_sriov(adev, job ? false : true); 5267 if (r) 5268 adev->asic_reset_res = r; 5269 5270 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */ 5271 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) 5272 amdgpu_ras_resume(adev); 5273 } else { 5274 r = amdgpu_do_asic_reset(device_list_handle, reset_context); 5275 if (r && r == -EAGAIN) { 5276 set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags); 5277 adev->asic_reset_res = 0; 5278 goto retry; 5279 } 5280 } 5281 5282 skip_hw_reset: 5283 5284 /* Post ASIC reset for all devs .*/ 5285 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5286 5287 /* 5288 * Sometimes a later bad compute job can block a good gfx job as gfx 5289 * and compute ring share internal GC HW mutually. We add an additional 5290 * guilty jobs recheck step to find the real guilty job, it synchronously 5291 * submits and pends for the first job being signaled. If it gets timeout, 5292 * we identify it as a real guilty job. 5293 */ 5294 if (amdgpu_gpu_recovery == 2 && 5295 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter))) 5296 amdgpu_device_recheck_guilty_jobs( 5297 tmp_adev, device_list_handle, reset_context); 5298 5299 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5300 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5301 5302 if (!ring || !ring->sched.thread) 5303 continue; 5304 5305 /* No point to resubmit jobs if we didn't HW reset*/ 5306 if (!tmp_adev->asic_reset_res && !job_signaled) 5307 drm_sched_resubmit_jobs(&ring->sched); 5308 5309 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); 5310 } 5311 5312 if (adev->enable_mes) 5313 amdgpu_mes_self_test(tmp_adev); 5314 5315 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { 5316 drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); 5317 } 5318 5319 if (tmp_adev->asic_reset_res) 5320 r = tmp_adev->asic_reset_res; 5321 5322 tmp_adev->asic_reset_res = 0; 5323 5324 if (r) { 5325 /* bad news, how to tell it to userspace ? */ 5326 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); 5327 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); 5328 } else { 5329 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); 5330 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) 5331 DRM_WARN("smart shift update failed\n"); 5332 } 5333 } 5334 5335 skip_sched_resume: 5336 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5337 /* unlock kfd: SRIOV would do it separately */ 5338 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) 5339 amdgpu_amdkfd_post_reset(tmp_adev); 5340 5341 /* kfd_post_reset will do nothing if kfd device is not initialized, 5342 * need to bring up kfd here if it's not be initialized before 5343 */ 5344 if (!adev->kfd.init_complete) 5345 amdgpu_amdkfd_device_init(adev); 5346 5347 if (audio_suspended) 5348 amdgpu_device_resume_display_audio(tmp_adev); 5349 5350 amdgpu_device_unset_mp1_state(tmp_adev); 5351 } 5352 5353 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 5354 reset_list); 5355 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); 5356 5357 if (hive) { 5358 mutex_unlock(&hive->hive_lock); 5359 amdgpu_put_xgmi_hive(hive); 5360 } 5361 5362 if (r) 5363 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); 5364 5365 atomic_set(&adev->reset_domain->reset_res, r); 5366 return r; 5367 } 5368 5369 /** 5370 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot 5371 * 5372 * @adev: amdgpu_device pointer 5373 * 5374 * Fetchs and stores in the driver the PCIE capabilities (gen speed 5375 * and lanes) of the slot the device is in. Handles APUs and 5376 * virtualized environments where PCIE config space may not be available. 5377 */ 5378 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 5379 { 5380 struct pci_dev *pdev; 5381 enum pci_bus_speed speed_cap, platform_speed_cap; 5382 enum pcie_link_width platform_link_width; 5383 5384 if (amdgpu_pcie_gen_cap) 5385 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 5386 5387 if (amdgpu_pcie_lane_cap) 5388 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; 5389 5390 /* covers APUs as well */ 5391 if (pci_is_root_bus(adev->pdev->bus)) { 5392 if (adev->pm.pcie_gen_mask == 0) 5393 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 5394 if (adev->pm.pcie_mlw_mask == 0) 5395 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 5396 return; 5397 } 5398 5399 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) 5400 return; 5401 5402 pcie_bandwidth_available(adev->pdev, NULL, 5403 &platform_speed_cap, &platform_link_width); 5404 5405 if (adev->pm.pcie_gen_mask == 0) { 5406 /* asic caps */ 5407 pdev = adev->pdev; 5408 speed_cap = pcie_get_speed_cap(pdev); 5409 if (speed_cap == PCI_SPEED_UNKNOWN) { 5410 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5411 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5412 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 5413 } else { 5414 if (speed_cap == PCIE_SPEED_32_0GT) 5415 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5416 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5417 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5418 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 | 5419 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5); 5420 else if (speed_cap == PCIE_SPEED_16_0GT) 5421 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5422 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5423 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5424 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4); 5425 else if (speed_cap == PCIE_SPEED_8_0GT) 5426 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5427 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5428 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 5429 else if (speed_cap == PCIE_SPEED_5_0GT) 5430 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5431 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2); 5432 else 5433 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; 5434 } 5435 /* platform caps */ 5436 if (platform_speed_cap == PCI_SPEED_UNKNOWN) { 5437 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5438 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 5439 } else { 5440 if (platform_speed_cap == PCIE_SPEED_32_0GT) 5441 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5442 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5443 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5444 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 | 5445 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5); 5446 else if (platform_speed_cap == PCIE_SPEED_16_0GT) 5447 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5448 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5449 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5450 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); 5451 else if (platform_speed_cap == PCIE_SPEED_8_0GT) 5452 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5453 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5454 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); 5455 else if (platform_speed_cap == PCIE_SPEED_5_0GT) 5456 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5457 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 5458 else 5459 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 5460 5461 } 5462 } 5463 if (adev->pm.pcie_mlw_mask == 0) { 5464 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { 5465 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; 5466 } else { 5467 switch (platform_link_width) { 5468 case PCIE_LNK_X32: 5469 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 5470 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 5471 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5472 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5473 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5474 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5475 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5476 break; 5477 case PCIE_LNK_X16: 5478 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 5479 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5480 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5481 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5482 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5483 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5484 break; 5485 case PCIE_LNK_X12: 5486 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5487 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5488 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5489 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5490 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5491 break; 5492 case PCIE_LNK_X8: 5493 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5494 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5495 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5496 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5497 break; 5498 case PCIE_LNK_X4: 5499 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5500 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5501 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5502 break; 5503 case PCIE_LNK_X2: 5504 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5505 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5506 break; 5507 case PCIE_LNK_X1: 5508 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 5509 break; 5510 default: 5511 break; 5512 } 5513 } 5514 } 5515 } 5516 5517 /** 5518 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR 5519 * 5520 * @adev: amdgpu_device pointer 5521 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev 5522 * 5523 * Return true if @peer_adev can access (DMA) @adev through the PCIe 5524 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of 5525 * @peer_adev. 5526 */ 5527 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 5528 struct amdgpu_device *peer_adev) 5529 { 5530 #ifdef CONFIG_HSA_AMD_P2P 5531 uint64_t address_mask = peer_adev->dev->dma_mask ? 5532 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); 5533 resource_size_t aper_limit = 5534 adev->gmc.aper_base + adev->gmc.aper_size - 1; 5535 bool p2p_access = !adev->gmc.xgmi.connected_to_cpu && 5536 !(pci_p2pdma_distance_many(adev->pdev, 5537 &peer_adev->dev, 1, true) < 0); 5538 5539 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size && 5540 adev->gmc.real_vram_size == adev->gmc.visible_vram_size && 5541 !(adev->gmc.aper_base & address_mask || 5542 aper_limit & address_mask)); 5543 #else 5544 return false; 5545 #endif 5546 } 5547 5548 int amdgpu_device_baco_enter(struct drm_device *dev) 5549 { 5550 struct amdgpu_device *adev = drm_to_adev(dev); 5551 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 5552 5553 if (!amdgpu_device_supports_baco(adev_to_drm(adev))) 5554 return -ENOTSUPP; 5555 5556 if (ras && adev->ras_enabled && 5557 adev->nbio.funcs->enable_doorbell_interrupt) 5558 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); 5559 5560 return amdgpu_dpm_baco_enter(adev); 5561 } 5562 5563 int amdgpu_device_baco_exit(struct drm_device *dev) 5564 { 5565 struct amdgpu_device *adev = drm_to_adev(dev); 5566 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 5567 int ret = 0; 5568 5569 if (!amdgpu_device_supports_baco(adev_to_drm(adev))) 5570 return -ENOTSUPP; 5571 5572 ret = amdgpu_dpm_baco_exit(adev); 5573 if (ret) 5574 return ret; 5575 5576 if (ras && adev->ras_enabled && 5577 adev->nbio.funcs->enable_doorbell_interrupt) 5578 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); 5579 5580 if (amdgpu_passthrough(adev) && 5581 adev->nbio.funcs->clear_doorbell_interrupt) 5582 adev->nbio.funcs->clear_doorbell_interrupt(adev); 5583 5584 return 0; 5585 } 5586 5587 /** 5588 * amdgpu_pci_error_detected - Called when a PCI error is detected. 5589 * @pdev: PCI device struct 5590 * @state: PCI channel state 5591 * 5592 * Description: Called when a PCI error is detected. 5593 * 5594 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 5595 */ 5596 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5597 { 5598 struct drm_device *dev = pci_get_drvdata(pdev); 5599 struct amdgpu_device *adev = drm_to_adev(dev); 5600 int i; 5601 5602 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state); 5603 5604 if (adev->gmc.xgmi.num_physical_nodes > 1) { 5605 DRM_WARN("No support for XGMI hive yet..."); 5606 return PCI_ERS_RESULT_DISCONNECT; 5607 } 5608 5609 adev->pci_channel_state = state; 5610 5611 switch (state) { 5612 case pci_channel_io_normal: 5613 return PCI_ERS_RESULT_CAN_RECOVER; 5614 /* Fatal error, prepare for slot reset */ 5615 case pci_channel_io_frozen: 5616 /* 5617 * Locking adev->reset_domain->sem will prevent any external access 5618 * to GPU during PCI error recovery 5619 */ 5620 amdgpu_device_lock_reset_domain(adev->reset_domain); 5621 amdgpu_device_set_mp1_state(adev); 5622 5623 /* 5624 * Block any work scheduling as we do for regular GPU reset 5625 * for the duration of the recovery 5626 */ 5627 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5628 struct amdgpu_ring *ring = adev->rings[i]; 5629 5630 if (!ring || !ring->sched.thread) 5631 continue; 5632 5633 drm_sched_stop(&ring->sched, NULL); 5634 } 5635 atomic_inc(&adev->gpu_reset_counter); 5636 return PCI_ERS_RESULT_NEED_RESET; 5637 case pci_channel_io_perm_failure: 5638 /* Permanent error, prepare for device removal */ 5639 return PCI_ERS_RESULT_DISCONNECT; 5640 } 5641 5642 return PCI_ERS_RESULT_NEED_RESET; 5643 } 5644 5645 /** 5646 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers 5647 * @pdev: pointer to PCI device 5648 */ 5649 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev) 5650 { 5651 5652 DRM_INFO("PCI error: mmio enabled callback!!\n"); 5653 5654 /* TODO - dump whatever for debugging purposes */ 5655 5656 /* This called only if amdgpu_pci_error_detected returns 5657 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 5658 * works, no need to reset slot. 5659 */ 5660 5661 return PCI_ERS_RESULT_RECOVERED; 5662 } 5663 5664 /** 5665 * amdgpu_pci_slot_reset - Called when PCI slot has been reset. 5666 * @pdev: PCI device struct 5667 * 5668 * Description: This routine is called by the pci error recovery 5669 * code after the PCI slot has been reset, just before we 5670 * should resume normal operations. 5671 */ 5672 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) 5673 { 5674 struct drm_device *dev = pci_get_drvdata(pdev); 5675 struct amdgpu_device *adev = drm_to_adev(dev); 5676 int r, i; 5677 struct amdgpu_reset_context reset_context; 5678 u32 memsize; 5679 struct list_head device_list; 5680 5681 DRM_INFO("PCI error: slot reset callback!!\n"); 5682 5683 memset(&reset_context, 0, sizeof(reset_context)); 5684 5685 INIT_LIST_HEAD(&device_list); 5686 list_add_tail(&adev->reset_list, &device_list); 5687 5688 /* wait for asic to come out of reset */ 5689 msleep(500); 5690 5691 /* Restore PCI confspace */ 5692 amdgpu_device_load_pci_state(pdev); 5693 5694 /* confirm ASIC came out of reset */ 5695 for (i = 0; i < adev->usec_timeout; i++) { 5696 memsize = amdgpu_asic_get_config_memsize(adev); 5697 5698 if (memsize != 0xffffffff) 5699 break; 5700 udelay(1); 5701 } 5702 if (memsize == 0xffffffff) { 5703 r = -ETIME; 5704 goto out; 5705 } 5706 5707 reset_context.method = AMD_RESET_METHOD_NONE; 5708 reset_context.reset_req_dev = adev; 5709 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 5710 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); 5711 set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); 5712 5713 adev->no_hw_access = true; 5714 r = amdgpu_device_pre_asic_reset(adev, &reset_context); 5715 adev->no_hw_access = false; 5716 if (r) 5717 goto out; 5718 5719 r = amdgpu_do_asic_reset(&device_list, &reset_context); 5720 5721 out: 5722 if (!r) { 5723 if (amdgpu_device_cache_pci_state(adev->pdev)) 5724 pci_restore_state(adev->pdev); 5725 5726 DRM_INFO("PCIe error recovery succeeded\n"); 5727 } else { 5728 DRM_ERROR("PCIe error recovery failed, err:%d", r); 5729 amdgpu_device_unset_mp1_state(adev); 5730 amdgpu_device_unlock_reset_domain(adev->reset_domain); 5731 } 5732 5733 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 5734 } 5735 5736 /** 5737 * amdgpu_pci_resume() - resume normal ops after PCI reset 5738 * @pdev: pointer to PCI device 5739 * 5740 * Called when the error recovery driver tells us that its 5741 * OK to resume normal operation. 5742 */ 5743 void amdgpu_pci_resume(struct pci_dev *pdev) 5744 { 5745 struct drm_device *dev = pci_get_drvdata(pdev); 5746 struct amdgpu_device *adev = drm_to_adev(dev); 5747 int i; 5748 5749 5750 DRM_INFO("PCI error: resume callback!!\n"); 5751 5752 /* Only continue execution for the case of pci_channel_io_frozen */ 5753 if (adev->pci_channel_state != pci_channel_io_frozen) 5754 return; 5755 5756 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5757 struct amdgpu_ring *ring = adev->rings[i]; 5758 5759 if (!ring || !ring->sched.thread) 5760 continue; 5761 5762 5763 drm_sched_resubmit_jobs(&ring->sched); 5764 drm_sched_start(&ring->sched, true); 5765 } 5766 5767 amdgpu_device_unset_mp1_state(adev); 5768 amdgpu_device_unlock_reset_domain(adev->reset_domain); 5769 } 5770 5771 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) 5772 { 5773 struct drm_device *dev = pci_get_drvdata(pdev); 5774 struct amdgpu_device *adev = drm_to_adev(dev); 5775 int r; 5776 5777 r = pci_save_state(pdev); 5778 if (!r) { 5779 kfree(adev->pci_state); 5780 5781 adev->pci_state = pci_store_saved_state(pdev); 5782 5783 if (!adev->pci_state) { 5784 DRM_ERROR("Failed to store PCI saved state"); 5785 return false; 5786 } 5787 } else { 5788 DRM_WARN("Failed to save PCI state, err:%d\n", r); 5789 return false; 5790 } 5791 5792 return true; 5793 } 5794 5795 bool amdgpu_device_load_pci_state(struct pci_dev *pdev) 5796 { 5797 struct drm_device *dev = pci_get_drvdata(pdev); 5798 struct amdgpu_device *adev = drm_to_adev(dev); 5799 int r; 5800 5801 if (!adev->pci_state) 5802 return false; 5803 5804 r = pci_load_saved_state(pdev, adev->pci_state); 5805 5806 if (!r) { 5807 pci_restore_state(pdev); 5808 } else { 5809 DRM_WARN("Failed to load PCI state, err:%d\n", r); 5810 return false; 5811 } 5812 5813 return true; 5814 } 5815 5816 void amdgpu_device_flush_hdp(struct amdgpu_device *adev, 5817 struct amdgpu_ring *ring) 5818 { 5819 #ifdef CONFIG_X86_64 5820 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) 5821 return; 5822 #endif 5823 if (adev->gmc.xgmi.connected_to_cpu) 5824 return; 5825 5826 if (ring && ring->funcs->emit_hdp_flush) 5827 amdgpu_ring_emit_hdp_flush(ring); 5828 else 5829 amdgpu_asic_flush_hdp(adev, ring); 5830 } 5831 5832 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, 5833 struct amdgpu_ring *ring) 5834 { 5835 #ifdef CONFIG_X86_64 5836 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) 5837 return; 5838 #endif 5839 if (adev->gmc.xgmi.connected_to_cpu) 5840 return; 5841 5842 amdgpu_asic_invalidate_hdp(adev, ring); 5843 } 5844 5845 int amdgpu_in_reset(struct amdgpu_device *adev) 5846 { 5847 return atomic_read(&adev->reset_domain->in_gpu_reset); 5848 } 5849 5850 /** 5851 * amdgpu_device_halt() - bring hardware to some kind of halt state 5852 * 5853 * @adev: amdgpu_device pointer 5854 * 5855 * Bring hardware to some kind of halt state so that no one can touch it 5856 * any more. It will help to maintain error context when error occurred. 5857 * Compare to a simple hang, the system will keep stable at least for SSH 5858 * access. Then it should be trivial to inspect the hardware state and 5859 * see what's going on. Implemented as following: 5860 * 5861 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc), 5862 * clears all CPU mappings to device, disallows remappings through page faults 5863 * 2. amdgpu_irq_disable_all() disables all interrupts 5864 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences 5865 * 4. set adev->no_hw_access to avoid potential crashes after setp 5 5866 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings 5867 * 6. pci_disable_device() and pci_wait_for_pending_transaction() 5868 * flush any in flight DMA operations 5869 */ 5870 void amdgpu_device_halt(struct amdgpu_device *adev) 5871 { 5872 struct pci_dev *pdev = adev->pdev; 5873 struct drm_device *ddev = adev_to_drm(adev); 5874 5875 drm_dev_unplug(ddev); 5876 5877 amdgpu_irq_disable_all(adev); 5878 5879 amdgpu_fence_driver_hw_fini(adev); 5880 5881 adev->no_hw_access = true; 5882 5883 amdgpu_device_unmap_mmio(adev); 5884 5885 pci_disable_device(pdev); 5886 pci_wait_for_pending_transaction(pdev); 5887 } 5888 5889 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, 5890 u32 reg) 5891 { 5892 unsigned long flags, address, data; 5893 u32 r; 5894 5895 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); 5896 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); 5897 5898 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 5899 WREG32(address, reg * 4); 5900 (void)RREG32(address); 5901 r = RREG32(data); 5902 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 5903 return r; 5904 } 5905 5906 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, 5907 u32 reg, u32 v) 5908 { 5909 unsigned long flags, address, data; 5910 5911 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); 5912 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); 5913 5914 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 5915 WREG32(address, reg * 4); 5916 (void)RREG32(address); 5917 WREG32(data, v); 5918 (void)RREG32(data); 5919 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 5920 } 5921