1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/kthread.h>
29 #include <linux/console.h>
30 #include <linux/slab.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/amdgpu_drm.h>
35 #include <linux/vgaarb.h>
36 #include <linux/vga_switcheroo.h>
37 #include <linux/efi.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_i2c.h"
41 #include "atom.h"
42 #include "amdgpu_atombios.h"
43 #include "amdgpu_atomfirmware.h"
44 #include "amd_pcie.h"
45 #ifdef CONFIG_DRM_AMDGPU_SI
46 #include "si.h"
47 #endif
48 #ifdef CONFIG_DRM_AMDGPU_CIK
49 #include "cik.h"
50 #endif
51 #include "vi.h"
52 #include "soc15.h"
53 #include "bif/bif_4_1_d.h"
54 #include <linux/pci.h>
55 #include <linux/firmware.h>
56 #include "amdgpu_vf_error.h"
57 
58 #include "amdgpu_amdkfd.h"
59 #include "amdgpu_pm.h"
60 
61 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
62 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
63 
64 #define AMDGPU_RESUME_MS		2000
65 
66 static const char *amdgpu_asic_name[] = {
67 	"TAHITI",
68 	"PITCAIRN",
69 	"VERDE",
70 	"OLAND",
71 	"HAINAN",
72 	"BONAIRE",
73 	"KAVERI",
74 	"KABINI",
75 	"HAWAII",
76 	"MULLINS",
77 	"TOPAZ",
78 	"TONGA",
79 	"FIJI",
80 	"CARRIZO",
81 	"STONEY",
82 	"POLARIS10",
83 	"POLARIS11",
84 	"POLARIS12",
85 	"VEGA10",
86 	"RAVEN",
87 	"LAST",
88 };
89 
90 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
91 
92 bool amdgpu_device_is_px(struct drm_device *dev)
93 {
94 	struct amdgpu_device *adev = dev->dev_private;
95 
96 	if (adev->flags & AMD_IS_PX)
97 		return true;
98 	return false;
99 }
100 
101 /*
102  * MMIO register access helper functions.
103  */
104 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
105 			uint32_t acc_flags)
106 {
107 	uint32_t ret;
108 
109 	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
110 		return amdgpu_virt_kiq_rreg(adev, reg);
111 
112 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
113 		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
114 	else {
115 		unsigned long flags;
116 
117 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
118 		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
119 		ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
120 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
121 	}
122 	trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
123 	return ret;
124 }
125 
126 /*
127  * MMIO register read with bytes helper functions
128  * @offset:bytes offset from MMIO start
129  *
130 */
131 
132 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
133 	if (offset < adev->rmmio_size)
134 		return (readb(adev->rmmio + offset));
135 	BUG();
136 }
137 
138 /*
139  * MMIO register write with bytes helper functions
140  * @offset:bytes offset from MMIO start
141  * @value: the value want to be written to the register
142  *
143 */
144 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
145 	if (offset < adev->rmmio_size)
146 		writeb(value, adev->rmmio + offset);
147 	else
148 		BUG();
149 }
150 
151 
152 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
153 		    uint32_t acc_flags)
154 {
155 	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
156 
157 	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
158 		adev->last_mm_index = v;
159 	}
160 
161 	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
162 		return amdgpu_virt_kiq_wreg(adev, reg, v);
163 
164 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
165 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
166 	else {
167 		unsigned long flags;
168 
169 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
170 		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
171 		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
172 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
173 	}
174 
175 	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
176 		udelay(500);
177 	}
178 }
179 
180 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
181 {
182 	if ((reg * 4) < adev->rio_mem_size)
183 		return ioread32(adev->rio_mem + (reg * 4));
184 	else {
185 		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
186 		return ioread32(adev->rio_mem + (mmMM_DATA * 4));
187 	}
188 }
189 
190 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
191 {
192 	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
193 		adev->last_mm_index = v;
194 	}
195 
196 	if ((reg * 4) < adev->rio_mem_size)
197 		iowrite32(v, adev->rio_mem + (reg * 4));
198 	else {
199 		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
200 		iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
201 	}
202 
203 	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
204 		udelay(500);
205 	}
206 }
207 
208 /**
209  * amdgpu_mm_rdoorbell - read a doorbell dword
210  *
211  * @adev: amdgpu_device pointer
212  * @index: doorbell index
213  *
214  * Returns the value in the doorbell aperture at the
215  * requested doorbell index (CIK).
216  */
217 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
218 {
219 	if (index < adev->doorbell.num_doorbells) {
220 		return readl(adev->doorbell.ptr + index);
221 	} else {
222 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
223 		return 0;
224 	}
225 }
226 
227 /**
228  * amdgpu_mm_wdoorbell - write a doorbell dword
229  *
230  * @adev: amdgpu_device pointer
231  * @index: doorbell index
232  * @v: value to write
233  *
234  * Writes @v to the doorbell aperture at the
235  * requested doorbell index (CIK).
236  */
237 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
238 {
239 	if (index < adev->doorbell.num_doorbells) {
240 		writel(v, adev->doorbell.ptr + index);
241 	} else {
242 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
243 	}
244 }
245 
246 /**
247  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
248  *
249  * @adev: amdgpu_device pointer
250  * @index: doorbell index
251  *
252  * Returns the value in the doorbell aperture at the
253  * requested doorbell index (VEGA10+).
254  */
255 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
256 {
257 	if (index < adev->doorbell.num_doorbells) {
258 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
259 	} else {
260 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
261 		return 0;
262 	}
263 }
264 
265 /**
266  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
267  *
268  * @adev: amdgpu_device pointer
269  * @index: doorbell index
270  * @v: value to write
271  *
272  * Writes @v to the doorbell aperture at the
273  * requested doorbell index (VEGA10+).
274  */
275 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
276 {
277 	if (index < adev->doorbell.num_doorbells) {
278 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
279 	} else {
280 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
281 	}
282 }
283 
284 /**
285  * amdgpu_invalid_rreg - dummy reg read function
286  *
287  * @adev: amdgpu device pointer
288  * @reg: offset of register
289  *
290  * Dummy register read function.  Used for register blocks
291  * that certain asics don't have (all asics).
292  * Returns the value in the register.
293  */
294 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
295 {
296 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
297 	BUG();
298 	return 0;
299 }
300 
301 /**
302  * amdgpu_invalid_wreg - dummy reg write function
303  *
304  * @adev: amdgpu device pointer
305  * @reg: offset of register
306  * @v: value to write to the register
307  *
308  * Dummy register read function.  Used for register blocks
309  * that certain asics don't have (all asics).
310  */
311 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
312 {
313 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
314 		  reg, v);
315 	BUG();
316 }
317 
318 /**
319  * amdgpu_block_invalid_rreg - dummy reg read function
320  *
321  * @adev: amdgpu device pointer
322  * @block: offset of instance
323  * @reg: offset of register
324  *
325  * Dummy register read function.  Used for register blocks
326  * that certain asics don't have (all asics).
327  * Returns the value in the register.
328  */
329 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
330 					  uint32_t block, uint32_t reg)
331 {
332 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
333 		  reg, block);
334 	BUG();
335 	return 0;
336 }
337 
338 /**
339  * amdgpu_block_invalid_wreg - dummy reg write function
340  *
341  * @adev: amdgpu device pointer
342  * @block: offset of instance
343  * @reg: offset of register
344  * @v: value to write to the register
345  *
346  * Dummy register read function.  Used for register blocks
347  * that certain asics don't have (all asics).
348  */
349 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
350 				      uint32_t block,
351 				      uint32_t reg, uint32_t v)
352 {
353 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
354 		  reg, block, v);
355 	BUG();
356 }
357 
358 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
359 {
360 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
361 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
362 				       &adev->vram_scratch.robj,
363 				       &adev->vram_scratch.gpu_addr,
364 				       (void **)&adev->vram_scratch.ptr);
365 }
366 
367 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
368 {
369 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
370 }
371 
372 /**
373  * amdgpu_device_program_register_sequence - program an array of registers.
374  *
375  * @adev: amdgpu_device pointer
376  * @registers: pointer to the register array
377  * @array_size: size of the register array
378  *
379  * Programs an array or registers with and and or masks.
380  * This is a helper for setting golden registers.
381  */
382 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
383 					     const u32 *registers,
384 					     const u32 array_size)
385 {
386 	u32 tmp, reg, and_mask, or_mask;
387 	int i;
388 
389 	if (array_size % 3)
390 		return;
391 
392 	for (i = 0; i < array_size; i +=3) {
393 		reg = registers[i + 0];
394 		and_mask = registers[i + 1];
395 		or_mask = registers[i + 2];
396 
397 		if (and_mask == 0xffffffff) {
398 			tmp = or_mask;
399 		} else {
400 			tmp = RREG32(reg);
401 			tmp &= ~and_mask;
402 			tmp |= or_mask;
403 		}
404 		WREG32(reg, tmp);
405 	}
406 }
407 
408 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
409 {
410 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
411 }
412 
413 /*
414  * GPU doorbell aperture helpers function.
415  */
416 /**
417  * amdgpu_device_doorbell_init - Init doorbell driver information.
418  *
419  * @adev: amdgpu_device pointer
420  *
421  * Init doorbell driver information (CIK)
422  * Returns 0 on success, error on failure.
423  */
424 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
425 {
426 	/* No doorbell on SI hardware generation */
427 	if (adev->asic_type < CHIP_BONAIRE) {
428 		adev->doorbell.base = 0;
429 		adev->doorbell.size = 0;
430 		adev->doorbell.num_doorbells = 0;
431 		adev->doorbell.ptr = NULL;
432 		return 0;
433 	}
434 
435 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
436 		return -EINVAL;
437 
438 	/* doorbell bar mapping */
439 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
440 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
441 
442 	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
443 					     AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
444 	if (adev->doorbell.num_doorbells == 0)
445 		return -EINVAL;
446 
447 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
448 				     adev->doorbell.num_doorbells *
449 				     sizeof(u32));
450 	if (adev->doorbell.ptr == NULL)
451 		return -ENOMEM;
452 
453 	return 0;
454 }
455 
456 /**
457  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
458  *
459  * @adev: amdgpu_device pointer
460  *
461  * Tear down doorbell driver information (CIK)
462  */
463 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
464 {
465 	iounmap(adev->doorbell.ptr);
466 	adev->doorbell.ptr = NULL;
467 }
468 
469 
470 
471 /*
472  * amdgpu_device_wb_*()
473  * Writeback is the method by which the GPU updates special pages in memory
474  * with the status of certain GPU events (fences, ring pointers,etc.).
475  */
476 
477 /**
478  * amdgpu_device_wb_fini - Disable Writeback and free memory
479  *
480  * @adev: amdgpu_device pointer
481  *
482  * Disables Writeback and frees the Writeback memory (all asics).
483  * Used at driver shutdown.
484  */
485 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
486 {
487 	if (adev->wb.wb_obj) {
488 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
489 				      &adev->wb.gpu_addr,
490 				      (void **)&adev->wb.wb);
491 		adev->wb.wb_obj = NULL;
492 	}
493 }
494 
495 /**
496  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
497  *
498  * @adev: amdgpu_device pointer
499  *
500  * Initializes writeback and allocates writeback memory (all asics).
501  * Used at driver startup.
502  * Returns 0 on success or an -error on failure.
503  */
504 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
505 {
506 	int r;
507 
508 	if (adev->wb.wb_obj == NULL) {
509 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
510 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
511 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
512 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
513 					    (void **)&adev->wb.wb);
514 		if (r) {
515 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
516 			return r;
517 		}
518 
519 		adev->wb.num_wb = AMDGPU_MAX_WB;
520 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
521 
522 		/* clear wb memory */
523 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
524 	}
525 
526 	return 0;
527 }
528 
529 /**
530  * amdgpu_device_wb_get - Allocate a wb entry
531  *
532  * @adev: amdgpu_device pointer
533  * @wb: wb index
534  *
535  * Allocate a wb slot for use by the driver (all asics).
536  * Returns 0 on success or -EINVAL on failure.
537  */
538 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
539 {
540 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
541 
542 	if (offset < adev->wb.num_wb) {
543 		__set_bit(offset, adev->wb.used);
544 		*wb = offset << 3; /* convert to dw offset */
545 		return 0;
546 	} else {
547 		return -EINVAL;
548 	}
549 }
550 
551 /**
552  * amdgpu_device_wb_free - Free a wb entry
553  *
554  * @adev: amdgpu_device pointer
555  * @wb: wb index
556  *
557  * Free a wb slot allocated for use by the driver (all asics)
558  */
559 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
560 {
561 	wb >>= 3;
562 	if (wb < adev->wb.num_wb)
563 		__clear_bit(wb, adev->wb.used);
564 }
565 
566 /**
567  * amdgpu_device_vram_location - try to find VRAM location
568  * @adev: amdgpu device structure holding all necessary informations
569  * @mc: memory controller structure holding memory informations
570  * @base: base address at which to put VRAM
571  *
572  * Function will try to place VRAM at base address provided
573  * as parameter.
574  */
575 void amdgpu_device_vram_location(struct amdgpu_device *adev,
576 				 struct amdgpu_gmc *mc, u64 base)
577 {
578 	uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
579 
580 	mc->vram_start = base;
581 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
582 	if (limit && limit < mc->real_vram_size)
583 		mc->real_vram_size = limit;
584 	dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
585 			mc->mc_vram_size >> 20, mc->vram_start,
586 			mc->vram_end, mc->real_vram_size >> 20);
587 }
588 
589 /**
590  * amdgpu_device_gart_location - try to find GTT location
591  * @adev: amdgpu device structure holding all necessary informations
592  * @mc: memory controller structure holding memory informations
593  *
594  * Function will place try to place GTT before or after VRAM.
595  *
596  * If GTT size is bigger than space left then we ajust GTT size.
597  * Thus function will never fails.
598  *
599  * FIXME: when reducing GTT size align new size on power of 2.
600  */
601 void amdgpu_device_gart_location(struct amdgpu_device *adev,
602 				 struct amdgpu_gmc *mc)
603 {
604 	u64 size_af, size_bf;
605 
606 	size_af = adev->gmc.mc_mask - mc->vram_end;
607 	size_bf = mc->vram_start;
608 	if (size_bf > size_af) {
609 		if (mc->gart_size > size_bf) {
610 			dev_warn(adev->dev, "limiting GTT\n");
611 			mc->gart_size = size_bf;
612 		}
613 		mc->gart_start = 0;
614 	} else {
615 		if (mc->gart_size > size_af) {
616 			dev_warn(adev->dev, "limiting GTT\n");
617 			mc->gart_size = size_af;
618 		}
619 		/* VCE doesn't like it when BOs cross a 4GB segment, so align
620 		 * the GART base on a 4GB boundary as well.
621 		 */
622 		mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
623 	}
624 	mc->gart_end = mc->gart_start + mc->gart_size - 1;
625 	dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
626 			mc->gart_size >> 20, mc->gart_start, mc->gart_end);
627 }
628 
629 /**
630  * amdgpu_device_resize_fb_bar - try to resize FB BAR
631  *
632  * @adev: amdgpu_device pointer
633  *
634  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
635  * to fail, but if any of the BARs is not accessible after the size we abort
636  * driver loading by returning -ENODEV.
637  */
638 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
639 {
640 	u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
641 	u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
642 	struct pci_bus *root;
643 	struct resource *res;
644 	unsigned i;
645 	u16 cmd;
646 	int r;
647 
648 	/* Bypass for VF */
649 	if (amdgpu_sriov_vf(adev))
650 		return 0;
651 
652 	/* Check if the root BUS has 64bit memory resources */
653 	root = adev->pdev->bus;
654 	while (root->parent)
655 		root = root->parent;
656 
657 	pci_bus_for_each_resource(root, res, i) {
658 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
659 		    res->start > 0x100000000ull)
660 			break;
661 	}
662 
663 	/* Trying to resize is pointless without a root hub window above 4GB */
664 	if (!res)
665 		return 0;
666 
667 	/* Disable memory decoding while we change the BAR addresses and size */
668 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
669 	pci_write_config_word(adev->pdev, PCI_COMMAND,
670 			      cmd & ~PCI_COMMAND_MEMORY);
671 
672 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
673 	amdgpu_device_doorbell_fini(adev);
674 	if (adev->asic_type >= CHIP_BONAIRE)
675 		pci_release_resource(adev->pdev, 2);
676 
677 	pci_release_resource(adev->pdev, 0);
678 
679 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
680 	if (r == -ENOSPC)
681 		DRM_INFO("Not enough PCI address space for a large BAR.");
682 	else if (r && r != -ENOTSUPP)
683 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
684 
685 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
686 
687 	/* When the doorbell or fb BAR isn't available we have no chance of
688 	 * using the device.
689 	 */
690 	r = amdgpu_device_doorbell_init(adev);
691 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
692 		return -ENODEV;
693 
694 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
695 
696 	return 0;
697 }
698 
699 /*
700  * GPU helpers function.
701  */
702 /**
703  * amdgpu_device_need_post - check if the hw need post or not
704  *
705  * @adev: amdgpu_device pointer
706  *
707  * Check if the asic has been initialized (all asics) at driver startup
708  * or post is needed if  hw reset is performed.
709  * Returns true if need or false if not.
710  */
711 bool amdgpu_device_need_post(struct amdgpu_device *adev)
712 {
713 	uint32_t reg;
714 
715 	if (amdgpu_sriov_vf(adev))
716 		return false;
717 
718 	if (amdgpu_passthrough(adev)) {
719 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
720 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
721 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
722 		 * vpost executed for smc version below 22.15
723 		 */
724 		if (adev->asic_type == CHIP_FIJI) {
725 			int err;
726 			uint32_t fw_ver;
727 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
728 			/* force vPost if error occured */
729 			if (err)
730 				return true;
731 
732 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
733 			if (fw_ver < 0x00160e00)
734 				return true;
735 		}
736 	}
737 
738 	if (adev->has_hw_reset) {
739 		adev->has_hw_reset = false;
740 		return true;
741 	}
742 
743 	/* bios scratch used on CIK+ */
744 	if (adev->asic_type >= CHIP_BONAIRE)
745 		return amdgpu_atombios_scratch_need_asic_init(adev);
746 
747 	/* check MEM_SIZE for older asics */
748 	reg = amdgpu_asic_get_config_memsize(adev);
749 
750 	if ((reg != 0) && (reg != 0xffffffff))
751 		return false;
752 
753 	return true;
754 }
755 
756 /* if we get transitioned to only one device, take VGA back */
757 /**
758  * amdgpu_device_vga_set_decode - enable/disable vga decode
759  *
760  * @cookie: amdgpu_device pointer
761  * @state: enable/disable vga decode
762  *
763  * Enable/disable vga decode (all asics).
764  * Returns VGA resource flags.
765  */
766 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
767 {
768 	struct amdgpu_device *adev = cookie;
769 	amdgpu_asic_set_vga_state(adev, state);
770 	if (state)
771 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
772 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
773 	else
774 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
775 }
776 
777 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
778 {
779 	/* defines number of bits in page table versus page directory,
780 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
781 	 * page table and the remaining bits are in the page directory */
782 	if (amdgpu_vm_block_size == -1)
783 		return;
784 
785 	if (amdgpu_vm_block_size < 9) {
786 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
787 			 amdgpu_vm_block_size);
788 		amdgpu_vm_block_size = -1;
789 	}
790 }
791 
792 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
793 {
794 	/* no need to check the default value */
795 	if (amdgpu_vm_size == -1)
796 		return;
797 
798 	if (amdgpu_vm_size < 1) {
799 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
800 			 amdgpu_vm_size);
801 		amdgpu_vm_size = -1;
802 	}
803 }
804 
805 /**
806  * amdgpu_device_check_arguments - validate module params
807  *
808  * @adev: amdgpu_device pointer
809  *
810  * Validates certain module parameters and updates
811  * the associated values used by the driver (all asics).
812  */
813 static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
814 {
815 	if (amdgpu_sched_jobs < 4) {
816 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
817 			 amdgpu_sched_jobs);
818 		amdgpu_sched_jobs = 4;
819 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
820 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
821 			 amdgpu_sched_jobs);
822 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
823 	}
824 
825 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
826 		/* gart size must be greater or equal to 32M */
827 		dev_warn(adev->dev, "gart size (%d) too small\n",
828 			 amdgpu_gart_size);
829 		amdgpu_gart_size = -1;
830 	}
831 
832 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
833 		/* gtt size must be greater or equal to 32M */
834 		dev_warn(adev->dev, "gtt size (%d) too small\n",
835 				 amdgpu_gtt_size);
836 		amdgpu_gtt_size = -1;
837 	}
838 
839 	/* valid range is between 4 and 9 inclusive */
840 	if (amdgpu_vm_fragment_size != -1 &&
841 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
842 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
843 		amdgpu_vm_fragment_size = -1;
844 	}
845 
846 	amdgpu_device_check_vm_size(adev);
847 
848 	amdgpu_device_check_block_size(adev);
849 
850 	if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
851 	    !is_power_of_2(amdgpu_vram_page_split))) {
852 		dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
853 			 amdgpu_vram_page_split);
854 		amdgpu_vram_page_split = 1024;
855 	}
856 
857 	if (amdgpu_lockup_timeout == 0) {
858 		dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
859 		amdgpu_lockup_timeout = 10000;
860 	}
861 
862 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
863 }
864 
865 /**
866  * amdgpu_switcheroo_set_state - set switcheroo state
867  *
868  * @pdev: pci dev pointer
869  * @state: vga_switcheroo state
870  *
871  * Callback for the switcheroo driver.  Suspends or resumes the
872  * the asics before or after it is powered up using ACPI methods.
873  */
874 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
875 {
876 	struct drm_device *dev = pci_get_drvdata(pdev);
877 
878 	if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
879 		return;
880 
881 	if (state == VGA_SWITCHEROO_ON) {
882 		pr_info("amdgpu: switched on\n");
883 		/* don't suspend or resume card normally */
884 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
885 
886 		amdgpu_device_resume(dev, true, true);
887 
888 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
889 		drm_kms_helper_poll_enable(dev);
890 	} else {
891 		pr_info("amdgpu: switched off\n");
892 		drm_kms_helper_poll_disable(dev);
893 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
894 		amdgpu_device_suspend(dev, true, true);
895 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
896 	}
897 }
898 
899 /**
900  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
901  *
902  * @pdev: pci dev pointer
903  *
904  * Callback for the switcheroo driver.  Check of the switcheroo
905  * state can be changed.
906  * Returns true if the state can be changed, false if not.
907  */
908 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
909 {
910 	struct drm_device *dev = pci_get_drvdata(pdev);
911 
912 	/*
913 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
914 	* locking inversion with the driver load path. And the access here is
915 	* completely racy anyway. So don't bother with locking for now.
916 	*/
917 	return dev->open_count == 0;
918 }
919 
920 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
921 	.set_gpu_state = amdgpu_switcheroo_set_state,
922 	.reprobe = NULL,
923 	.can_switch = amdgpu_switcheroo_can_switch,
924 };
925 
926 int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
927 					   enum amd_ip_block_type block_type,
928 					   enum amd_clockgating_state state)
929 {
930 	int i, r = 0;
931 
932 	for (i = 0; i < adev->num_ip_blocks; i++) {
933 		if (!adev->ip_blocks[i].status.valid)
934 			continue;
935 		if (adev->ip_blocks[i].version->type != block_type)
936 			continue;
937 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
938 			continue;
939 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
940 			(void *)adev, state);
941 		if (r)
942 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
943 				  adev->ip_blocks[i].version->funcs->name, r);
944 	}
945 	return r;
946 }
947 
948 int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
949 					   enum amd_ip_block_type block_type,
950 					   enum amd_powergating_state state)
951 {
952 	int i, r = 0;
953 
954 	for (i = 0; i < adev->num_ip_blocks; i++) {
955 		if (!adev->ip_blocks[i].status.valid)
956 			continue;
957 		if (adev->ip_blocks[i].version->type != block_type)
958 			continue;
959 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
960 			continue;
961 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
962 			(void *)adev, state);
963 		if (r)
964 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
965 				  adev->ip_blocks[i].version->funcs->name, r);
966 	}
967 	return r;
968 }
969 
970 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
971 					    u32 *flags)
972 {
973 	int i;
974 
975 	for (i = 0; i < adev->num_ip_blocks; i++) {
976 		if (!adev->ip_blocks[i].status.valid)
977 			continue;
978 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
979 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
980 	}
981 }
982 
983 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
984 				   enum amd_ip_block_type block_type)
985 {
986 	int i, r;
987 
988 	for (i = 0; i < adev->num_ip_blocks; i++) {
989 		if (!adev->ip_blocks[i].status.valid)
990 			continue;
991 		if (adev->ip_blocks[i].version->type == block_type) {
992 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
993 			if (r)
994 				return r;
995 			break;
996 		}
997 	}
998 	return 0;
999 
1000 }
1001 
1002 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1003 			      enum amd_ip_block_type block_type)
1004 {
1005 	int i;
1006 
1007 	for (i = 0; i < adev->num_ip_blocks; i++) {
1008 		if (!adev->ip_blocks[i].status.valid)
1009 			continue;
1010 		if (adev->ip_blocks[i].version->type == block_type)
1011 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1012 	}
1013 	return true;
1014 
1015 }
1016 
1017 struct amdgpu_ip_block *
1018 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1019 			      enum amd_ip_block_type type)
1020 {
1021 	int i;
1022 
1023 	for (i = 0; i < adev->num_ip_blocks; i++)
1024 		if (adev->ip_blocks[i].version->type == type)
1025 			return &adev->ip_blocks[i];
1026 
1027 	return NULL;
1028 }
1029 
1030 /**
1031  * amdgpu_device_ip_block_version_cmp
1032  *
1033  * @adev: amdgpu_device pointer
1034  * @type: enum amd_ip_block_type
1035  * @major: major version
1036  * @minor: minor version
1037  *
1038  * return 0 if equal or greater
1039  * return 1 if smaller or the ip_block doesn't exist
1040  */
1041 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1042 				       enum amd_ip_block_type type,
1043 				       u32 major, u32 minor)
1044 {
1045 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1046 
1047 	if (ip_block && ((ip_block->version->major > major) ||
1048 			((ip_block->version->major == major) &&
1049 			(ip_block->version->minor >= minor))))
1050 		return 0;
1051 
1052 	return 1;
1053 }
1054 
1055 /**
1056  * amdgpu_device_ip_block_add
1057  *
1058  * @adev: amdgpu_device pointer
1059  * @ip_block_version: pointer to the IP to add
1060  *
1061  * Adds the IP block driver information to the collection of IPs
1062  * on the asic.
1063  */
1064 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1065 			       const struct amdgpu_ip_block_version *ip_block_version)
1066 {
1067 	if (!ip_block_version)
1068 		return -EINVAL;
1069 
1070 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1071 		  ip_block_version->funcs->name);
1072 
1073 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1074 
1075 	return 0;
1076 }
1077 
1078 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1079 {
1080 	adev->enable_virtual_display = false;
1081 
1082 	if (amdgpu_virtual_display) {
1083 		struct drm_device *ddev = adev->ddev;
1084 		const char *pci_address_name = pci_name(ddev->pdev);
1085 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1086 
1087 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1088 		pciaddstr_tmp = pciaddstr;
1089 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1090 			pciaddname = strsep(&pciaddname_tmp, ",");
1091 			if (!strcmp("all", pciaddname)
1092 			    || !strcmp(pci_address_name, pciaddname)) {
1093 				long num_crtc;
1094 				int res = -1;
1095 
1096 				adev->enable_virtual_display = true;
1097 
1098 				if (pciaddname_tmp)
1099 					res = kstrtol(pciaddname_tmp, 10,
1100 						      &num_crtc);
1101 
1102 				if (!res) {
1103 					if (num_crtc < 1)
1104 						num_crtc = 1;
1105 					if (num_crtc > 6)
1106 						num_crtc = 6;
1107 					adev->mode_info.num_crtc = num_crtc;
1108 				} else {
1109 					adev->mode_info.num_crtc = 1;
1110 				}
1111 				break;
1112 			}
1113 		}
1114 
1115 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1116 			 amdgpu_virtual_display, pci_address_name,
1117 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1118 
1119 		kfree(pciaddstr);
1120 	}
1121 }
1122 
1123 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1124 {
1125 	const char *chip_name;
1126 	char fw_name[30];
1127 	int err;
1128 	const struct gpu_info_firmware_header_v1_0 *hdr;
1129 
1130 	adev->firmware.gpu_info_fw = NULL;
1131 
1132 	switch (adev->asic_type) {
1133 	case CHIP_TOPAZ:
1134 	case CHIP_TONGA:
1135 	case CHIP_FIJI:
1136 	case CHIP_POLARIS11:
1137 	case CHIP_POLARIS10:
1138 	case CHIP_POLARIS12:
1139 	case CHIP_CARRIZO:
1140 	case CHIP_STONEY:
1141 #ifdef CONFIG_DRM_AMDGPU_SI
1142 	case CHIP_VERDE:
1143 	case CHIP_TAHITI:
1144 	case CHIP_PITCAIRN:
1145 	case CHIP_OLAND:
1146 	case CHIP_HAINAN:
1147 #endif
1148 #ifdef CONFIG_DRM_AMDGPU_CIK
1149 	case CHIP_BONAIRE:
1150 	case CHIP_HAWAII:
1151 	case CHIP_KAVERI:
1152 	case CHIP_KABINI:
1153 	case CHIP_MULLINS:
1154 #endif
1155 	default:
1156 		return 0;
1157 	case CHIP_VEGA10:
1158 		chip_name = "vega10";
1159 		break;
1160 	case CHIP_RAVEN:
1161 		chip_name = "raven";
1162 		break;
1163 	}
1164 
1165 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1166 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1167 	if (err) {
1168 		dev_err(adev->dev,
1169 			"Failed to load gpu_info firmware \"%s\"\n",
1170 			fw_name);
1171 		goto out;
1172 	}
1173 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1174 	if (err) {
1175 		dev_err(adev->dev,
1176 			"Failed to validate gpu_info firmware \"%s\"\n",
1177 			fw_name);
1178 		goto out;
1179 	}
1180 
1181 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1182 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1183 
1184 	switch (hdr->version_major) {
1185 	case 1:
1186 	{
1187 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1188 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1189 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1190 
1191 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1192 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1193 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1194 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1195 		adev->gfx.config.max_texture_channel_caches =
1196 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1197 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1198 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1199 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1200 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1201 		adev->gfx.config.double_offchip_lds_buf =
1202 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1203 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1204 		adev->gfx.cu_info.max_waves_per_simd =
1205 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1206 		adev->gfx.cu_info.max_scratch_slots_per_cu =
1207 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1208 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1209 		break;
1210 	}
1211 	default:
1212 		dev_err(adev->dev,
1213 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1214 		err = -EINVAL;
1215 		goto out;
1216 	}
1217 out:
1218 	return err;
1219 }
1220 
1221 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1222 {
1223 	int i, r;
1224 
1225 	amdgpu_device_enable_virtual_display(adev);
1226 
1227 	switch (adev->asic_type) {
1228 	case CHIP_TOPAZ:
1229 	case CHIP_TONGA:
1230 	case CHIP_FIJI:
1231 	case CHIP_POLARIS11:
1232 	case CHIP_POLARIS10:
1233 	case CHIP_POLARIS12:
1234 	case CHIP_CARRIZO:
1235 	case CHIP_STONEY:
1236 		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1237 			adev->family = AMDGPU_FAMILY_CZ;
1238 		else
1239 			adev->family = AMDGPU_FAMILY_VI;
1240 
1241 		r = vi_set_ip_blocks(adev);
1242 		if (r)
1243 			return r;
1244 		break;
1245 #ifdef CONFIG_DRM_AMDGPU_SI
1246 	case CHIP_VERDE:
1247 	case CHIP_TAHITI:
1248 	case CHIP_PITCAIRN:
1249 	case CHIP_OLAND:
1250 	case CHIP_HAINAN:
1251 		adev->family = AMDGPU_FAMILY_SI;
1252 		r = si_set_ip_blocks(adev);
1253 		if (r)
1254 			return r;
1255 		break;
1256 #endif
1257 #ifdef CONFIG_DRM_AMDGPU_CIK
1258 	case CHIP_BONAIRE:
1259 	case CHIP_HAWAII:
1260 	case CHIP_KAVERI:
1261 	case CHIP_KABINI:
1262 	case CHIP_MULLINS:
1263 		if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1264 			adev->family = AMDGPU_FAMILY_CI;
1265 		else
1266 			adev->family = AMDGPU_FAMILY_KV;
1267 
1268 		r = cik_set_ip_blocks(adev);
1269 		if (r)
1270 			return r;
1271 		break;
1272 #endif
1273 	case  CHIP_VEGA10:
1274 	case  CHIP_RAVEN:
1275 		if (adev->asic_type == CHIP_RAVEN)
1276 			adev->family = AMDGPU_FAMILY_RV;
1277 		else
1278 			adev->family = AMDGPU_FAMILY_AI;
1279 
1280 		r = soc15_set_ip_blocks(adev);
1281 		if (r)
1282 			return r;
1283 		break;
1284 	default:
1285 		/* FIXME: not supported yet */
1286 		return -EINVAL;
1287 	}
1288 
1289 	r = amdgpu_device_parse_gpu_info_fw(adev);
1290 	if (r)
1291 		return r;
1292 
1293 	amdgpu_amdkfd_device_probe(adev);
1294 
1295 	if (amdgpu_sriov_vf(adev)) {
1296 		r = amdgpu_virt_request_full_gpu(adev, true);
1297 		if (r)
1298 			return -EAGAIN;
1299 	}
1300 
1301 	for (i = 0; i < adev->num_ip_blocks; i++) {
1302 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1303 			DRM_ERROR("disabled ip block: %d <%s>\n",
1304 				  i, adev->ip_blocks[i].version->funcs->name);
1305 			adev->ip_blocks[i].status.valid = false;
1306 		} else {
1307 			if (adev->ip_blocks[i].version->funcs->early_init) {
1308 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1309 				if (r == -ENOENT) {
1310 					adev->ip_blocks[i].status.valid = false;
1311 				} else if (r) {
1312 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
1313 						  adev->ip_blocks[i].version->funcs->name, r);
1314 					return r;
1315 				} else {
1316 					adev->ip_blocks[i].status.valid = true;
1317 				}
1318 			} else {
1319 				adev->ip_blocks[i].status.valid = true;
1320 			}
1321 		}
1322 	}
1323 
1324 	adev->cg_flags &= amdgpu_cg_mask;
1325 	adev->pg_flags &= amdgpu_pg_mask;
1326 
1327 	return 0;
1328 }
1329 
1330 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1331 {
1332 	int i, r;
1333 
1334 	for (i = 0; i < adev->num_ip_blocks; i++) {
1335 		if (!adev->ip_blocks[i].status.valid)
1336 			continue;
1337 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1338 		if (r) {
1339 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1340 				  adev->ip_blocks[i].version->funcs->name, r);
1341 			return r;
1342 		}
1343 		adev->ip_blocks[i].status.sw = true;
1344 
1345 		/* need to do gmc hw init early so we can allocate gpu mem */
1346 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1347 			r = amdgpu_device_vram_scratch_init(adev);
1348 			if (r) {
1349 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1350 				return r;
1351 			}
1352 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1353 			if (r) {
1354 				DRM_ERROR("hw_init %d failed %d\n", i, r);
1355 				return r;
1356 			}
1357 			r = amdgpu_device_wb_init(adev);
1358 			if (r) {
1359 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1360 				return r;
1361 			}
1362 			adev->ip_blocks[i].status.hw = true;
1363 
1364 			/* right after GMC hw init, we create CSA */
1365 			if (amdgpu_sriov_vf(adev)) {
1366 				r = amdgpu_allocate_static_csa(adev);
1367 				if (r) {
1368 					DRM_ERROR("allocate CSA failed %d\n", r);
1369 					return r;
1370 				}
1371 			}
1372 		}
1373 	}
1374 
1375 	for (i = 0; i < adev->num_ip_blocks; i++) {
1376 		if (!adev->ip_blocks[i].status.sw)
1377 			continue;
1378 		if (adev->ip_blocks[i].status.hw)
1379 			continue;
1380 		r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1381 		if (r) {
1382 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1383 				  adev->ip_blocks[i].version->funcs->name, r);
1384 			return r;
1385 		}
1386 		adev->ip_blocks[i].status.hw = true;
1387 	}
1388 
1389 	amdgpu_amdkfd_device_init(adev);
1390 
1391 	if (amdgpu_sriov_vf(adev))
1392 		amdgpu_virt_release_full_gpu(adev, true);
1393 
1394 	return 0;
1395 }
1396 
1397 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
1398 {
1399 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1400 }
1401 
1402 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
1403 {
1404 	return !!memcmp(adev->gart.ptr, adev->reset_magic,
1405 			AMDGPU_RESET_MAGIC_NUM);
1406 }
1407 
1408 static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
1409 {
1410 	int i = 0, r;
1411 
1412 	if (amdgpu_emu_mode == 1)
1413 		return 0;
1414 
1415 	for (i = 0; i < adev->num_ip_blocks; i++) {
1416 		if (!adev->ip_blocks[i].status.valid)
1417 			continue;
1418 		/* skip CG for VCE/UVD, it's handled specially */
1419 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1420 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1421 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1422 			/* enable clockgating to save power */
1423 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1424 										     AMD_CG_STATE_GATE);
1425 			if (r) {
1426 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1427 					  adev->ip_blocks[i].version->funcs->name, r);
1428 				return r;
1429 			}
1430 		}
1431 	}
1432 	return 0;
1433 }
1434 
1435 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
1436 {
1437 	int i = 0, r;
1438 
1439 	for (i = 0; i < adev->num_ip_blocks; i++) {
1440 		if (!adev->ip_blocks[i].status.valid)
1441 			continue;
1442 		if (adev->ip_blocks[i].version->funcs->late_init) {
1443 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1444 			if (r) {
1445 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
1446 					  adev->ip_blocks[i].version->funcs->name, r);
1447 				return r;
1448 			}
1449 			adev->ip_blocks[i].status.late_initialized = true;
1450 		}
1451 	}
1452 
1453 	mod_delayed_work(system_wq, &adev->late_init_work,
1454 			msecs_to_jiffies(AMDGPU_RESUME_MS));
1455 
1456 	amdgpu_device_fill_reset_magic(adev);
1457 
1458 	return 0;
1459 }
1460 
1461 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1462 {
1463 	int i, r;
1464 
1465 	amdgpu_amdkfd_device_fini(adev);
1466 	/* need to disable SMC first */
1467 	for (i = 0; i < adev->num_ip_blocks; i++) {
1468 		if (!adev->ip_blocks[i].status.hw)
1469 			continue;
1470 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1471 			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1472 			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1473 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1474 										     AMD_CG_STATE_UNGATE);
1475 			if (r) {
1476 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1477 					  adev->ip_blocks[i].version->funcs->name, r);
1478 				return r;
1479 			}
1480 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1481 			/* XXX handle errors */
1482 			if (r) {
1483 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1484 					  adev->ip_blocks[i].version->funcs->name, r);
1485 			}
1486 			adev->ip_blocks[i].status.hw = false;
1487 			break;
1488 		}
1489 	}
1490 
1491 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1492 		if (!adev->ip_blocks[i].status.hw)
1493 			continue;
1494 
1495 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1496 			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1497 			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1498 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1499 										     AMD_CG_STATE_UNGATE);
1500 			if (r) {
1501 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1502 					  adev->ip_blocks[i].version->funcs->name, r);
1503 				return r;
1504 			}
1505 		}
1506 
1507 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1508 		/* XXX handle errors */
1509 		if (r) {
1510 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1511 				  adev->ip_blocks[i].version->funcs->name, r);
1512 		}
1513 
1514 		adev->ip_blocks[i].status.hw = false;
1515 	}
1516 
1517 	/* disable all interrupts */
1518 	amdgpu_irq_disable_all(adev);
1519 
1520 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1521 		if (!adev->ip_blocks[i].status.sw)
1522 			continue;
1523 
1524 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1525 			amdgpu_free_static_csa(adev);
1526 			amdgpu_device_wb_fini(adev);
1527 			amdgpu_device_vram_scratch_fini(adev);
1528 		}
1529 
1530 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1531 		/* XXX handle errors */
1532 		if (r) {
1533 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1534 				  adev->ip_blocks[i].version->funcs->name, r);
1535 		}
1536 		adev->ip_blocks[i].status.sw = false;
1537 		adev->ip_blocks[i].status.valid = false;
1538 	}
1539 
1540 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1541 		if (!adev->ip_blocks[i].status.late_initialized)
1542 			continue;
1543 		if (adev->ip_blocks[i].version->funcs->late_fini)
1544 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1545 		adev->ip_blocks[i].status.late_initialized = false;
1546 	}
1547 
1548 	if (amdgpu_sriov_vf(adev))
1549 		if (amdgpu_virt_release_full_gpu(adev, false))
1550 			DRM_ERROR("failed to release exclusive mode on fini\n");
1551 
1552 	return 0;
1553 }
1554 
1555 static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
1556 {
1557 	struct amdgpu_device *adev =
1558 		container_of(work, struct amdgpu_device, late_init_work.work);
1559 	amdgpu_device_ip_late_set_cg_state(adev);
1560 }
1561 
1562 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
1563 {
1564 	int i, r;
1565 
1566 	if (amdgpu_sriov_vf(adev))
1567 		amdgpu_virt_request_full_gpu(adev, false);
1568 
1569 	/* ungate SMC block first */
1570 	r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1571 						   AMD_CG_STATE_UNGATE);
1572 	if (r) {
1573 		DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
1574 	}
1575 
1576 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1577 		if (!adev->ip_blocks[i].status.valid)
1578 			continue;
1579 		/* ungate blocks so that suspend can properly shut them down */
1580 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
1581 			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1582 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1583 										     AMD_CG_STATE_UNGATE);
1584 			if (r) {
1585 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1586 					  adev->ip_blocks[i].version->funcs->name, r);
1587 			}
1588 		}
1589 		/* XXX handle errors */
1590 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
1591 		/* XXX handle errors */
1592 		if (r) {
1593 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
1594 				  adev->ip_blocks[i].version->funcs->name, r);
1595 		}
1596 	}
1597 
1598 	if (amdgpu_sriov_vf(adev))
1599 		amdgpu_virt_release_full_gpu(adev, false);
1600 
1601 	return 0;
1602 }
1603 
1604 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
1605 {
1606 	int i, r;
1607 
1608 	static enum amd_ip_block_type ip_order[] = {
1609 		AMD_IP_BLOCK_TYPE_GMC,
1610 		AMD_IP_BLOCK_TYPE_COMMON,
1611 		AMD_IP_BLOCK_TYPE_IH,
1612 	};
1613 
1614 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1615 		int j;
1616 		struct amdgpu_ip_block *block;
1617 
1618 		for (j = 0; j < adev->num_ip_blocks; j++) {
1619 			block = &adev->ip_blocks[j];
1620 
1621 			if (block->version->type != ip_order[i] ||
1622 				!block->status.valid)
1623 				continue;
1624 
1625 			r = block->version->funcs->hw_init(adev);
1626 			DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1627 			if (r)
1628 				return r;
1629 		}
1630 	}
1631 
1632 	return 0;
1633 }
1634 
1635 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
1636 {
1637 	int i, r;
1638 
1639 	static enum amd_ip_block_type ip_order[] = {
1640 		AMD_IP_BLOCK_TYPE_SMC,
1641 		AMD_IP_BLOCK_TYPE_PSP,
1642 		AMD_IP_BLOCK_TYPE_DCE,
1643 		AMD_IP_BLOCK_TYPE_GFX,
1644 		AMD_IP_BLOCK_TYPE_SDMA,
1645 		AMD_IP_BLOCK_TYPE_UVD,
1646 		AMD_IP_BLOCK_TYPE_VCE
1647 	};
1648 
1649 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1650 		int j;
1651 		struct amdgpu_ip_block *block;
1652 
1653 		for (j = 0; j < adev->num_ip_blocks; j++) {
1654 			block = &adev->ip_blocks[j];
1655 
1656 			if (block->version->type != ip_order[i] ||
1657 				!block->status.valid)
1658 				continue;
1659 
1660 			r = block->version->funcs->hw_init(adev);
1661 			DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1662 			if (r)
1663 				return r;
1664 		}
1665 	}
1666 
1667 	return 0;
1668 }
1669 
1670 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
1671 {
1672 	int i, r;
1673 
1674 	for (i = 0; i < adev->num_ip_blocks; i++) {
1675 		if (!adev->ip_blocks[i].status.valid)
1676 			continue;
1677 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1678 				adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1679 				adev->ip_blocks[i].version->type ==
1680 				AMD_IP_BLOCK_TYPE_IH) {
1681 			r = adev->ip_blocks[i].version->funcs->resume(adev);
1682 			if (r) {
1683 				DRM_ERROR("resume of IP block <%s> failed %d\n",
1684 					  adev->ip_blocks[i].version->funcs->name, r);
1685 				return r;
1686 			}
1687 		}
1688 	}
1689 
1690 	return 0;
1691 }
1692 
1693 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
1694 {
1695 	int i, r;
1696 
1697 	for (i = 0; i < adev->num_ip_blocks; i++) {
1698 		if (!adev->ip_blocks[i].status.valid)
1699 			continue;
1700 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1701 				adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1702 				adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1703 			continue;
1704 		r = adev->ip_blocks[i].version->funcs->resume(adev);
1705 		if (r) {
1706 			DRM_ERROR("resume of IP block <%s> failed %d\n",
1707 				  adev->ip_blocks[i].version->funcs->name, r);
1708 			return r;
1709 		}
1710 	}
1711 
1712 	return 0;
1713 }
1714 
1715 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
1716 {
1717 	int r;
1718 
1719 	r = amdgpu_device_ip_resume_phase1(adev);
1720 	if (r)
1721 		return r;
1722 	r = amdgpu_device_ip_resume_phase2(adev);
1723 
1724 	return r;
1725 }
1726 
1727 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
1728 {
1729 	if (amdgpu_sriov_vf(adev)) {
1730 		if (adev->is_atom_fw) {
1731 			if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1732 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1733 		} else {
1734 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1735 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1736 		}
1737 
1738 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
1739 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
1740 	}
1741 }
1742 
1743 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
1744 {
1745 	switch (asic_type) {
1746 #if defined(CONFIG_DRM_AMD_DC)
1747 	case CHIP_BONAIRE:
1748 	case CHIP_HAWAII:
1749 	case CHIP_KAVERI:
1750 	case CHIP_KABINI:
1751 	case CHIP_MULLINS:
1752 	case CHIP_CARRIZO:
1753 	case CHIP_STONEY:
1754 	case CHIP_POLARIS11:
1755 	case CHIP_POLARIS10:
1756 	case CHIP_POLARIS12:
1757 	case CHIP_TONGA:
1758 	case CHIP_FIJI:
1759 #if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
1760 		return amdgpu_dc != 0;
1761 #endif
1762 	case CHIP_VEGA10:
1763 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1764 	case CHIP_RAVEN:
1765 #endif
1766 		return amdgpu_dc != 0;
1767 #endif
1768 	default:
1769 		return false;
1770 	}
1771 }
1772 
1773 /**
1774  * amdgpu_device_has_dc_support - check if dc is supported
1775  *
1776  * @adev: amdgpu_device_pointer
1777  *
1778  * Returns true for supported, false for not supported
1779  */
1780 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
1781 {
1782 	if (amdgpu_sriov_vf(adev))
1783 		return false;
1784 
1785 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
1786 }
1787 
1788 /**
1789  * amdgpu_device_init - initialize the driver
1790  *
1791  * @adev: amdgpu_device pointer
1792  * @pdev: drm dev pointer
1793  * @pdev: pci dev pointer
1794  * @flags: driver flags
1795  *
1796  * Initializes the driver info and hw (all asics).
1797  * Returns 0 for success or an error on failure.
1798  * Called at driver startup.
1799  */
1800 int amdgpu_device_init(struct amdgpu_device *adev,
1801 		       struct drm_device *ddev,
1802 		       struct pci_dev *pdev,
1803 		       uint32_t flags)
1804 {
1805 	int r, i;
1806 	bool runtime = false;
1807 	u32 max_MBps;
1808 
1809 	adev->shutdown = false;
1810 	adev->dev = &pdev->dev;
1811 	adev->ddev = ddev;
1812 	adev->pdev = pdev;
1813 	adev->flags = flags;
1814 	adev->asic_type = flags & AMD_ASIC_MASK;
1815 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1816 	if (amdgpu_emu_mode == 1)
1817 		adev->usec_timeout *= 2;
1818 	adev->gmc.gart_size = 512 * 1024 * 1024;
1819 	adev->accel_working = false;
1820 	adev->num_rings = 0;
1821 	adev->mman.buffer_funcs = NULL;
1822 	adev->mman.buffer_funcs_ring = NULL;
1823 	adev->vm_manager.vm_pte_funcs = NULL;
1824 	adev->vm_manager.vm_pte_num_rings = 0;
1825 	adev->gmc.gmc_funcs = NULL;
1826 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1827 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1828 
1829 	adev->smc_rreg = &amdgpu_invalid_rreg;
1830 	adev->smc_wreg = &amdgpu_invalid_wreg;
1831 	adev->pcie_rreg = &amdgpu_invalid_rreg;
1832 	adev->pcie_wreg = &amdgpu_invalid_wreg;
1833 	adev->pciep_rreg = &amdgpu_invalid_rreg;
1834 	adev->pciep_wreg = &amdgpu_invalid_wreg;
1835 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1836 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1837 	adev->didt_rreg = &amdgpu_invalid_rreg;
1838 	adev->didt_wreg = &amdgpu_invalid_wreg;
1839 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1840 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
1841 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1842 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1843 
1844 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1845 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1846 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1847 
1848 	/* mutex initialization are all done here so we
1849 	 * can recall function without having locking issues */
1850 	atomic_set(&adev->irq.ih.lock, 0);
1851 	mutex_init(&adev->firmware.mutex);
1852 	mutex_init(&adev->pm.mutex);
1853 	mutex_init(&adev->gfx.gpu_clock_mutex);
1854 	mutex_init(&adev->srbm_mutex);
1855 	mutex_init(&adev->gfx.pipe_reserve_mutex);
1856 	mutex_init(&adev->grbm_idx_mutex);
1857 	mutex_init(&adev->mn_lock);
1858 	mutex_init(&adev->virt.vf_errors.lock);
1859 	hash_init(adev->mn_hash);
1860 	mutex_init(&adev->lock_reset);
1861 
1862 	amdgpu_device_check_arguments(adev);
1863 
1864 	spin_lock_init(&adev->mmio_idx_lock);
1865 	spin_lock_init(&adev->smc_idx_lock);
1866 	spin_lock_init(&adev->pcie_idx_lock);
1867 	spin_lock_init(&adev->uvd_ctx_idx_lock);
1868 	spin_lock_init(&adev->didt_idx_lock);
1869 	spin_lock_init(&adev->gc_cac_idx_lock);
1870 	spin_lock_init(&adev->se_cac_idx_lock);
1871 	spin_lock_init(&adev->audio_endpt_idx_lock);
1872 	spin_lock_init(&adev->mm_stats.lock);
1873 
1874 	INIT_LIST_HEAD(&adev->shadow_list);
1875 	mutex_init(&adev->shadow_list_lock);
1876 
1877 	INIT_LIST_HEAD(&adev->ring_lru_list);
1878 	spin_lock_init(&adev->ring_lru_list_lock);
1879 
1880 	INIT_DELAYED_WORK(&adev->late_init_work,
1881 			  amdgpu_device_ip_late_init_func_handler);
1882 
1883 	/* Registers mapping */
1884 	/* TODO: block userspace mapping of io register */
1885 	if (adev->asic_type >= CHIP_BONAIRE) {
1886 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1887 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1888 	} else {
1889 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1890 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1891 	}
1892 
1893 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1894 	if (adev->rmmio == NULL) {
1895 		return -ENOMEM;
1896 	}
1897 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1898 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1899 
1900 	/* doorbell bar mapping */
1901 	amdgpu_device_doorbell_init(adev);
1902 
1903 	/* io port mapping */
1904 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1905 		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1906 			adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1907 			adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1908 			break;
1909 		}
1910 	}
1911 	if (adev->rio_mem == NULL)
1912 		DRM_INFO("PCI I/O BAR is not found.\n");
1913 
1914 	amdgpu_device_get_pcie_info(adev);
1915 
1916 	/* early init functions */
1917 	r = amdgpu_device_ip_early_init(adev);
1918 	if (r)
1919 		return r;
1920 
1921 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1922 	/* this will fail for cards that aren't VGA class devices, just
1923 	 * ignore it */
1924 	vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
1925 
1926 	if (amdgpu_device_is_px(ddev))
1927 		runtime = true;
1928 	if (!pci_is_thunderbolt_attached(adev->pdev))
1929 		vga_switcheroo_register_client(adev->pdev,
1930 					       &amdgpu_switcheroo_ops, runtime);
1931 	if (runtime)
1932 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1933 
1934 	if (amdgpu_emu_mode == 1) {
1935 		/* post the asic on emulation mode */
1936 		emu_soc_asic_init(adev);
1937 		goto fence_driver_init;
1938 	}
1939 
1940 	/* Read BIOS */
1941 	if (!amdgpu_get_bios(adev)) {
1942 		r = -EINVAL;
1943 		goto failed;
1944 	}
1945 
1946 	r = amdgpu_atombios_init(adev);
1947 	if (r) {
1948 		dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1949 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
1950 		goto failed;
1951 	}
1952 
1953 	/* detect if we are with an SRIOV vbios */
1954 	amdgpu_device_detect_sriov_bios(adev);
1955 
1956 	/* Post card if necessary */
1957 	if (amdgpu_device_need_post(adev)) {
1958 		if (!adev->bios) {
1959 			dev_err(adev->dev, "no vBIOS found\n");
1960 			r = -EINVAL;
1961 			goto failed;
1962 		}
1963 		DRM_INFO("GPU posting now...\n");
1964 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1965 		if (r) {
1966 			dev_err(adev->dev, "gpu post error!\n");
1967 			goto failed;
1968 		}
1969 	}
1970 
1971 	if (adev->is_atom_fw) {
1972 		/* Initialize clocks */
1973 		r = amdgpu_atomfirmware_get_clock_info(adev);
1974 		if (r) {
1975 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
1976 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
1977 			goto failed;
1978 		}
1979 	} else {
1980 		/* Initialize clocks */
1981 		r = amdgpu_atombios_get_clock_info(adev);
1982 		if (r) {
1983 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1984 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
1985 			goto failed;
1986 		}
1987 		/* init i2c buses */
1988 		if (!amdgpu_device_has_dc_support(adev))
1989 			amdgpu_atombios_i2c_init(adev);
1990 	}
1991 
1992 fence_driver_init:
1993 	/* Fence driver */
1994 	r = amdgpu_fence_driver_init(adev);
1995 	if (r) {
1996 		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
1997 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
1998 		goto failed;
1999 	}
2000 
2001 	/* init the mode config */
2002 	drm_mode_config_init(adev->ddev);
2003 
2004 	r = amdgpu_device_ip_init(adev);
2005 	if (r) {
2006 		/* failed in exclusive mode due to timeout */
2007 		if (amdgpu_sriov_vf(adev) &&
2008 		    !amdgpu_sriov_runtime(adev) &&
2009 		    amdgpu_virt_mmio_blocked(adev) &&
2010 		    !amdgpu_virt_wait_reset(adev)) {
2011 			dev_err(adev->dev, "VF exclusive mode timeout\n");
2012 			/* Don't send request since VF is inactive. */
2013 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2014 			adev->virt.ops = NULL;
2015 			r = -EAGAIN;
2016 			goto failed;
2017 		}
2018 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
2019 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2020 		amdgpu_device_ip_fini(adev);
2021 		goto failed;
2022 	}
2023 
2024 	adev->accel_working = true;
2025 
2026 	amdgpu_vm_check_compute_bug(adev);
2027 
2028 	/* Initialize the buffer migration limit. */
2029 	if (amdgpu_moverate >= 0)
2030 		max_MBps = amdgpu_moverate;
2031 	else
2032 		max_MBps = 8; /* Allow 8 MB/s. */
2033 	/* Get a log2 for easy divisions. */
2034 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2035 
2036 	r = amdgpu_ib_pool_init(adev);
2037 	if (r) {
2038 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2039 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2040 		goto failed;
2041 	}
2042 
2043 	r = amdgpu_ib_ring_tests(adev);
2044 	if (r)
2045 		DRM_ERROR("ib ring test failed (%d).\n", r);
2046 
2047 	if (amdgpu_sriov_vf(adev))
2048 		amdgpu_virt_init_data_exchange(adev);
2049 
2050 	amdgpu_fbdev_init(adev);
2051 
2052 	r = amdgpu_pm_sysfs_init(adev);
2053 	if (r)
2054 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2055 
2056 	r = amdgpu_debugfs_gem_init(adev);
2057 	if (r)
2058 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2059 
2060 	r = amdgpu_debugfs_regs_init(adev);
2061 	if (r)
2062 		DRM_ERROR("registering register debugfs failed (%d).\n", r);
2063 
2064 	r = amdgpu_debugfs_firmware_init(adev);
2065 	if (r)
2066 		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2067 
2068 	r = amdgpu_debugfs_init(adev);
2069 	if (r)
2070 		DRM_ERROR("Creating debugfs files failed (%d).\n", r);
2071 
2072 	if ((amdgpu_testing & 1)) {
2073 		if (adev->accel_working)
2074 			amdgpu_test_moves(adev);
2075 		else
2076 			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2077 	}
2078 	if (amdgpu_benchmarking) {
2079 		if (adev->accel_working)
2080 			amdgpu_benchmark(adev, amdgpu_benchmarking);
2081 		else
2082 			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2083 	}
2084 
2085 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
2086 	 * explicit gating rather than handling it automatically.
2087 	 */
2088 	r = amdgpu_device_ip_late_init(adev);
2089 	if (r) {
2090 		dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
2091 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2092 		goto failed;
2093 	}
2094 
2095 	return 0;
2096 
2097 failed:
2098 	amdgpu_vf_error_trans_all(adev);
2099 	if (runtime)
2100 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2101 
2102 	return r;
2103 }
2104 
2105 /**
2106  * amdgpu_device_fini - tear down the driver
2107  *
2108  * @adev: amdgpu_device pointer
2109  *
2110  * Tear down the driver info (all asics).
2111  * Called at driver shutdown.
2112  */
2113 void amdgpu_device_fini(struct amdgpu_device *adev)
2114 {
2115 	int r;
2116 
2117 	DRM_INFO("amdgpu: finishing device.\n");
2118 	adev->shutdown = true;
2119 	if (adev->mode_info.mode_config_initialized)
2120 		drm_crtc_force_disable_all(adev->ddev);
2121 
2122 	amdgpu_ib_pool_fini(adev);
2123 	amdgpu_fence_driver_fini(adev);
2124 	amdgpu_pm_sysfs_fini(adev);
2125 	amdgpu_fbdev_fini(adev);
2126 	r = amdgpu_device_ip_fini(adev);
2127 	if (adev->firmware.gpu_info_fw) {
2128 		release_firmware(adev->firmware.gpu_info_fw);
2129 		adev->firmware.gpu_info_fw = NULL;
2130 	}
2131 	adev->accel_working = false;
2132 	cancel_delayed_work_sync(&adev->late_init_work);
2133 	/* free i2c buses */
2134 	if (!amdgpu_device_has_dc_support(adev))
2135 		amdgpu_i2c_fini(adev);
2136 
2137 	if (amdgpu_emu_mode != 1)
2138 		amdgpu_atombios_fini(adev);
2139 
2140 	kfree(adev->bios);
2141 	adev->bios = NULL;
2142 	if (!pci_is_thunderbolt_attached(adev->pdev))
2143 		vga_switcheroo_unregister_client(adev->pdev);
2144 	if (adev->flags & AMD_IS_PX)
2145 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2146 	vga_client_register(adev->pdev, NULL, NULL, NULL);
2147 	if (adev->rio_mem)
2148 		pci_iounmap(adev->pdev, adev->rio_mem);
2149 	adev->rio_mem = NULL;
2150 	iounmap(adev->rmmio);
2151 	adev->rmmio = NULL;
2152 	amdgpu_device_doorbell_fini(adev);
2153 	amdgpu_debugfs_regs_cleanup(adev);
2154 }
2155 
2156 
2157 /*
2158  * Suspend & resume.
2159  */
2160 /**
2161  * amdgpu_device_suspend - initiate device suspend
2162  *
2163  * @pdev: drm dev pointer
2164  * @state: suspend state
2165  *
2166  * Puts the hw in the suspend state (all asics).
2167  * Returns 0 for success or an error on failure.
2168  * Called at driver suspend.
2169  */
2170 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2171 {
2172 	struct amdgpu_device *adev;
2173 	struct drm_crtc *crtc;
2174 	struct drm_connector *connector;
2175 	int r;
2176 
2177 	if (dev == NULL || dev->dev_private == NULL) {
2178 		return -ENODEV;
2179 	}
2180 
2181 	adev = dev->dev_private;
2182 
2183 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2184 		return 0;
2185 
2186 	drm_kms_helper_poll_disable(dev);
2187 
2188 	if (!amdgpu_device_has_dc_support(adev)) {
2189 		/* turn off display hw */
2190 		drm_modeset_lock_all(dev);
2191 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2192 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2193 		}
2194 		drm_modeset_unlock_all(dev);
2195 	}
2196 
2197 	amdgpu_amdkfd_suspend(adev);
2198 
2199 	/* unpin the front buffers and cursors */
2200 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2201 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2202 		struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2203 		struct amdgpu_bo *robj;
2204 
2205 		if (amdgpu_crtc->cursor_bo) {
2206 			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2207 			r = amdgpu_bo_reserve(aobj, true);
2208 			if (r == 0) {
2209 				amdgpu_bo_unpin(aobj);
2210 				amdgpu_bo_unreserve(aobj);
2211 			}
2212 		}
2213 
2214 		if (rfb == NULL || rfb->obj == NULL) {
2215 			continue;
2216 		}
2217 		robj = gem_to_amdgpu_bo(rfb->obj);
2218 		/* don't unpin kernel fb objects */
2219 		if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2220 			r = amdgpu_bo_reserve(robj, true);
2221 			if (r == 0) {
2222 				amdgpu_bo_unpin(robj);
2223 				amdgpu_bo_unreserve(robj);
2224 			}
2225 		}
2226 	}
2227 	/* evict vram memory */
2228 	amdgpu_bo_evict_vram(adev);
2229 
2230 	amdgpu_fence_driver_suspend(adev);
2231 
2232 	r = amdgpu_device_ip_suspend(adev);
2233 
2234 	/* evict remaining vram memory
2235 	 * This second call to evict vram is to evict the gart page table
2236 	 * using the CPU.
2237 	 */
2238 	amdgpu_bo_evict_vram(adev);
2239 
2240 	pci_save_state(dev->pdev);
2241 	if (suspend) {
2242 		/* Shut down the device */
2243 		pci_disable_device(dev->pdev);
2244 		pci_set_power_state(dev->pdev, PCI_D3hot);
2245 	} else {
2246 		r = amdgpu_asic_reset(adev);
2247 		if (r)
2248 			DRM_ERROR("amdgpu asic reset failed\n");
2249 	}
2250 
2251 	if (fbcon) {
2252 		console_lock();
2253 		amdgpu_fbdev_set_suspend(adev, 1);
2254 		console_unlock();
2255 	}
2256 	return 0;
2257 }
2258 
2259 /**
2260  * amdgpu_device_resume - initiate device resume
2261  *
2262  * @pdev: drm dev pointer
2263  *
2264  * Bring the hw back to operating state (all asics).
2265  * Returns 0 for success or an error on failure.
2266  * Called at driver resume.
2267  */
2268 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2269 {
2270 	struct drm_connector *connector;
2271 	struct amdgpu_device *adev = dev->dev_private;
2272 	struct drm_crtc *crtc;
2273 	int r = 0;
2274 
2275 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2276 		return 0;
2277 
2278 	if (fbcon)
2279 		console_lock();
2280 
2281 	if (resume) {
2282 		pci_set_power_state(dev->pdev, PCI_D0);
2283 		pci_restore_state(dev->pdev);
2284 		r = pci_enable_device(dev->pdev);
2285 		if (r)
2286 			goto unlock;
2287 	}
2288 
2289 	/* post card */
2290 	if (amdgpu_device_need_post(adev)) {
2291 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2292 		if (r)
2293 			DRM_ERROR("amdgpu asic init failed\n");
2294 	}
2295 
2296 	r = amdgpu_device_ip_resume(adev);
2297 	if (r) {
2298 		DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
2299 		goto unlock;
2300 	}
2301 	amdgpu_fence_driver_resume(adev);
2302 
2303 	if (resume) {
2304 		r = amdgpu_ib_ring_tests(adev);
2305 		if (r)
2306 			DRM_ERROR("ib ring test failed (%d).\n", r);
2307 	}
2308 
2309 	r = amdgpu_device_ip_late_init(adev);
2310 	if (r)
2311 		goto unlock;
2312 
2313 	/* pin cursors */
2314 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2315 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2316 
2317 		if (amdgpu_crtc->cursor_bo) {
2318 			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2319 			r = amdgpu_bo_reserve(aobj, true);
2320 			if (r == 0) {
2321 				r = amdgpu_bo_pin(aobj,
2322 						  AMDGPU_GEM_DOMAIN_VRAM,
2323 						  &amdgpu_crtc->cursor_addr);
2324 				if (r != 0)
2325 					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2326 				amdgpu_bo_unreserve(aobj);
2327 			}
2328 		}
2329 	}
2330 	r = amdgpu_amdkfd_resume(adev);
2331 	if (r)
2332 		return r;
2333 
2334 	/* blat the mode back in */
2335 	if (fbcon) {
2336 		if (!amdgpu_device_has_dc_support(adev)) {
2337 			/* pre DCE11 */
2338 			drm_helper_resume_force_mode(dev);
2339 
2340 			/* turn on display hw */
2341 			drm_modeset_lock_all(dev);
2342 			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2343 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2344 			}
2345 			drm_modeset_unlock_all(dev);
2346 		}
2347 	}
2348 
2349 	drm_kms_helper_poll_enable(dev);
2350 
2351 	/*
2352 	 * Most of the connector probing functions try to acquire runtime pm
2353 	 * refs to ensure that the GPU is powered on when connector polling is
2354 	 * performed. Since we're calling this from a runtime PM callback,
2355 	 * trying to acquire rpm refs will cause us to deadlock.
2356 	 *
2357 	 * Since we're guaranteed to be holding the rpm lock, it's safe to
2358 	 * temporarily disable the rpm helpers so this doesn't deadlock us.
2359 	 */
2360 #ifdef CONFIG_PM
2361 	dev->dev->power.disable_depth++;
2362 #endif
2363 	if (!amdgpu_device_has_dc_support(adev))
2364 		drm_helper_hpd_irq_event(dev);
2365 	else
2366 		drm_kms_helper_hotplug_event(dev);
2367 #ifdef CONFIG_PM
2368 	dev->dev->power.disable_depth--;
2369 #endif
2370 
2371 	if (fbcon)
2372 		amdgpu_fbdev_set_suspend(adev, 0);
2373 
2374 unlock:
2375 	if (fbcon)
2376 		console_unlock();
2377 
2378 	return r;
2379 }
2380 
2381 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
2382 {
2383 	int i;
2384 	bool asic_hang = false;
2385 
2386 	if (amdgpu_sriov_vf(adev))
2387 		return true;
2388 
2389 	for (i = 0; i < adev->num_ip_blocks; i++) {
2390 		if (!adev->ip_blocks[i].status.valid)
2391 			continue;
2392 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2393 			adev->ip_blocks[i].status.hang =
2394 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2395 		if (adev->ip_blocks[i].status.hang) {
2396 			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2397 			asic_hang = true;
2398 		}
2399 	}
2400 	return asic_hang;
2401 }
2402 
2403 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
2404 {
2405 	int i, r = 0;
2406 
2407 	for (i = 0; i < adev->num_ip_blocks; i++) {
2408 		if (!adev->ip_blocks[i].status.valid)
2409 			continue;
2410 		if (adev->ip_blocks[i].status.hang &&
2411 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2412 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2413 			if (r)
2414 				return r;
2415 		}
2416 	}
2417 
2418 	return 0;
2419 }
2420 
2421 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
2422 {
2423 	int i;
2424 
2425 	for (i = 0; i < adev->num_ip_blocks; i++) {
2426 		if (!adev->ip_blocks[i].status.valid)
2427 			continue;
2428 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2429 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2430 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2431 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2432 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2433 			if (adev->ip_blocks[i].status.hang) {
2434 				DRM_INFO("Some block need full reset!\n");
2435 				return true;
2436 			}
2437 		}
2438 	}
2439 	return false;
2440 }
2441 
2442 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
2443 {
2444 	int i, r = 0;
2445 
2446 	for (i = 0; i < adev->num_ip_blocks; i++) {
2447 		if (!adev->ip_blocks[i].status.valid)
2448 			continue;
2449 		if (adev->ip_blocks[i].status.hang &&
2450 		    adev->ip_blocks[i].version->funcs->soft_reset) {
2451 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2452 			if (r)
2453 				return r;
2454 		}
2455 	}
2456 
2457 	return 0;
2458 }
2459 
2460 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
2461 {
2462 	int i, r = 0;
2463 
2464 	for (i = 0; i < adev->num_ip_blocks; i++) {
2465 		if (!adev->ip_blocks[i].status.valid)
2466 			continue;
2467 		if (adev->ip_blocks[i].status.hang &&
2468 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
2469 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2470 		if (r)
2471 			return r;
2472 	}
2473 
2474 	return 0;
2475 }
2476 
2477 static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2478 						  struct amdgpu_ring *ring,
2479 						  struct amdgpu_bo *bo,
2480 						  struct dma_fence **fence)
2481 {
2482 	uint32_t domain;
2483 	int r;
2484 
2485 	if (!bo->shadow)
2486 		return 0;
2487 
2488 	r = amdgpu_bo_reserve(bo, true);
2489 	if (r)
2490 		return r;
2491 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2492 	/* if bo has been evicted, then no need to recover */
2493 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2494 		r = amdgpu_bo_validate(bo->shadow);
2495 		if (r) {
2496 			DRM_ERROR("bo validate failed!\n");
2497 			goto err;
2498 		}
2499 
2500 		r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2501 						 NULL, fence, true);
2502 		if (r) {
2503 			DRM_ERROR("recover page table failed!\n");
2504 			goto err;
2505 		}
2506 	}
2507 err:
2508 	amdgpu_bo_unreserve(bo);
2509 	return r;
2510 }
2511 
2512 static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
2513 {
2514 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2515 	struct amdgpu_bo *bo, *tmp;
2516 	struct dma_fence *fence = NULL, *next = NULL;
2517 	long r = 1;
2518 	int i = 0;
2519 	long tmo;
2520 
2521 	if (amdgpu_sriov_runtime(adev))
2522 		tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
2523 	else
2524 		tmo = msecs_to_jiffies(100);
2525 
2526 	DRM_INFO("recover vram bo from shadow start\n");
2527 	mutex_lock(&adev->shadow_list_lock);
2528 	list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2529 		next = NULL;
2530 		amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
2531 		if (fence) {
2532 			r = dma_fence_wait_timeout(fence, false, tmo);
2533 			if (r == 0)
2534 				pr_err("wait fence %p[%d] timeout\n", fence, i);
2535 			else if (r < 0)
2536 				pr_err("wait fence %p[%d] interrupted\n", fence, i);
2537 			if (r < 1) {
2538 				dma_fence_put(fence);
2539 				fence = next;
2540 				break;
2541 			}
2542 			i++;
2543 		}
2544 
2545 		dma_fence_put(fence);
2546 		fence = next;
2547 	}
2548 	mutex_unlock(&adev->shadow_list_lock);
2549 
2550 	if (fence) {
2551 		r = dma_fence_wait_timeout(fence, false, tmo);
2552 		if (r == 0)
2553 			pr_err("wait fence %p[%d] timeout\n", fence, i);
2554 		else if (r < 0)
2555 			pr_err("wait fence %p[%d] interrupted\n", fence, i);
2556 
2557 	}
2558 	dma_fence_put(fence);
2559 
2560 	if (r > 0)
2561 		DRM_INFO("recover vram bo from shadow done\n");
2562 	else
2563 		DRM_ERROR("recover vram bo from shadow failed\n");
2564 
2565 	return (r > 0?0:1);
2566 }
2567 
2568 /*
2569  * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
2570  *
2571  * @adev: amdgpu device pointer
2572  *
2573  * attempt to do soft-reset or full-reset and reinitialize Asic
2574  * return 0 means successed otherwise failed
2575 */
2576 static int amdgpu_device_reset(struct amdgpu_device *adev)
2577 {
2578 	bool need_full_reset, vram_lost = 0;
2579 	int r;
2580 
2581 	need_full_reset = amdgpu_device_ip_need_full_reset(adev);
2582 
2583 	if (!need_full_reset) {
2584 		amdgpu_device_ip_pre_soft_reset(adev);
2585 		r = amdgpu_device_ip_soft_reset(adev);
2586 		amdgpu_device_ip_post_soft_reset(adev);
2587 		if (r || amdgpu_device_ip_check_soft_reset(adev)) {
2588 			DRM_INFO("soft reset failed, will fallback to full reset!\n");
2589 			need_full_reset = true;
2590 		}
2591 	}
2592 
2593 	if (need_full_reset) {
2594 		r = amdgpu_device_ip_suspend(adev);
2595 
2596 retry:
2597 		r = amdgpu_asic_reset(adev);
2598 		/* post card */
2599 		amdgpu_atom_asic_init(adev->mode_info.atom_context);
2600 
2601 		if (!r) {
2602 			dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2603 			r = amdgpu_device_ip_resume_phase1(adev);
2604 			if (r)
2605 				goto out;
2606 
2607 			vram_lost = amdgpu_device_check_vram_lost(adev);
2608 			if (vram_lost) {
2609 				DRM_ERROR("VRAM is lost!\n");
2610 				atomic_inc(&adev->vram_lost_counter);
2611 			}
2612 
2613 			r = amdgpu_gtt_mgr_recover(
2614 				&adev->mman.bdev.man[TTM_PL_TT]);
2615 			if (r)
2616 				goto out;
2617 
2618 			r = amdgpu_device_ip_resume_phase2(adev);
2619 			if (r)
2620 				goto out;
2621 
2622 			if (vram_lost)
2623 				amdgpu_device_fill_reset_magic(adev);
2624 		}
2625 	}
2626 
2627 out:
2628 	if (!r) {
2629 		amdgpu_irq_gpu_reset_resume_helper(adev);
2630 		r = amdgpu_ib_ring_tests(adev);
2631 		if (r) {
2632 			dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2633 			r = amdgpu_device_ip_suspend(adev);
2634 			need_full_reset = true;
2635 			goto retry;
2636 		}
2637 	}
2638 
2639 	if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
2640 		r = amdgpu_device_handle_vram_lost(adev);
2641 
2642 	return r;
2643 }
2644 
2645 /*
2646  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
2647  *
2648  * @adev: amdgpu device pointer
2649  *
2650  * do VF FLR and reinitialize Asic
2651  * return 0 means successed otherwise failed
2652 */
2653 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, bool from_hypervisor)
2654 {
2655 	int r;
2656 
2657 	if (from_hypervisor)
2658 		r = amdgpu_virt_request_full_gpu(adev, true);
2659 	else
2660 		r = amdgpu_virt_reset_gpu(adev);
2661 	if (r)
2662 		return r;
2663 
2664 	/* Resume IP prior to SMC */
2665 	r = amdgpu_device_ip_reinit_early_sriov(adev);
2666 	if (r)
2667 		goto error;
2668 
2669 	/* we need recover gart prior to run SMC/CP/SDMA resume */
2670 	amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
2671 
2672 	/* now we are okay to resume SMC/CP/SDMA */
2673 	r = amdgpu_device_ip_reinit_late_sriov(adev);
2674 	amdgpu_virt_release_full_gpu(adev, true);
2675 	if (r)
2676 		goto error;
2677 
2678 	amdgpu_irq_gpu_reset_resume_helper(adev);
2679 	r = amdgpu_ib_ring_tests(adev);
2680 
2681 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
2682 		atomic_inc(&adev->vram_lost_counter);
2683 		r = amdgpu_device_handle_vram_lost(adev);
2684 	}
2685 
2686 error:
2687 
2688 	return r;
2689 }
2690 
2691 /**
2692  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
2693  *
2694  * @adev: amdgpu device pointer
2695  * @job: which job trigger hang
2696  * @force forces reset regardless of amdgpu_gpu_recovery
2697  *
2698  * Attempt to reset the GPU if it has hung (all asics).
2699  * Returns 0 for success or an error on failure.
2700  */
2701 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2702 			      struct amdgpu_job *job, bool force)
2703 {
2704 	struct drm_atomic_state *state = NULL;
2705 	int i, r, resched;
2706 
2707 	if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
2708 		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2709 		return 0;
2710 	}
2711 
2712 	if (!force && (amdgpu_gpu_recovery == 0 ||
2713 			(amdgpu_gpu_recovery == -1  && !amdgpu_sriov_vf(adev)))) {
2714 		DRM_INFO("GPU recovery disabled.\n");
2715 		return 0;
2716 	}
2717 
2718 	dev_info(adev->dev, "GPU reset begin!\n");
2719 
2720 	mutex_lock(&adev->lock_reset);
2721 	atomic_inc(&adev->gpu_reset_counter);
2722 	adev->in_gpu_reset = 1;
2723 
2724 	/* block TTM */
2725 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2726 
2727 	/* store modesetting */
2728 	if (amdgpu_device_has_dc_support(adev))
2729 		state = drm_atomic_helper_suspend(adev->ddev);
2730 
2731 	/* block all schedulers and reset given job's ring */
2732 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2733 		struct amdgpu_ring *ring = adev->rings[i];
2734 
2735 		if (!ring || !ring->sched.thread)
2736 			continue;
2737 
2738 		kthread_park(ring->sched.thread);
2739 
2740 		if (job && job->ring->idx != i)
2741 			continue;
2742 
2743 		drm_sched_hw_job_reset(&ring->sched, &job->base);
2744 
2745 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2746 		amdgpu_fence_driver_force_completion(ring);
2747 	}
2748 
2749 	if (amdgpu_sriov_vf(adev))
2750 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
2751 	else
2752 		r = amdgpu_device_reset(adev);
2753 
2754 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2755 		struct amdgpu_ring *ring = adev->rings[i];
2756 
2757 		if (!ring || !ring->sched.thread)
2758 			continue;
2759 
2760 		/* only need recovery sched of the given job's ring
2761 		 * or all rings (in the case @job is NULL)
2762 		 * after above amdgpu_reset accomplished
2763 		 */
2764 		if ((!job || job->ring->idx == i) && !r)
2765 			drm_sched_job_recovery(&ring->sched);
2766 
2767 		kthread_unpark(ring->sched.thread);
2768 	}
2769 
2770 	if (amdgpu_device_has_dc_support(adev)) {
2771 		if (drm_atomic_helper_resume(adev->ddev, state))
2772 			dev_info(adev->dev, "drm resume failed:%d\n", r);
2773 	} else {
2774 		drm_helper_resume_force_mode(adev->ddev);
2775 	}
2776 
2777 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2778 
2779 	if (r) {
2780 		/* bad news, how to tell it to userspace ? */
2781 		dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
2782 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
2783 	} else {
2784 		dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
2785 	}
2786 
2787 	amdgpu_vf_error_trans_all(adev);
2788 	adev->in_gpu_reset = 0;
2789 	mutex_unlock(&adev->lock_reset);
2790 	return r;
2791 }
2792 
2793 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
2794 {
2795 	u32 mask;
2796 	int ret;
2797 
2798 	if (amdgpu_pcie_gen_cap)
2799 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2800 
2801 	if (amdgpu_pcie_lane_cap)
2802 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2803 
2804 	/* covers APUs as well */
2805 	if (pci_is_root_bus(adev->pdev->bus)) {
2806 		if (adev->pm.pcie_gen_mask == 0)
2807 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2808 		if (adev->pm.pcie_mlw_mask == 0)
2809 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2810 		return;
2811 	}
2812 
2813 	if (adev->pm.pcie_gen_mask == 0) {
2814 		ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2815 		if (!ret) {
2816 			adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2817 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2818 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2819 
2820 			if (mask & DRM_PCIE_SPEED_25)
2821 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2822 			if (mask & DRM_PCIE_SPEED_50)
2823 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2824 			if (mask & DRM_PCIE_SPEED_80)
2825 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2826 		} else {
2827 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2828 		}
2829 	}
2830 	if (adev->pm.pcie_mlw_mask == 0) {
2831 		ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2832 		if (!ret) {
2833 			switch (mask) {
2834 			case 32:
2835 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2836 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2837 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2838 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2839 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2840 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2841 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2842 				break;
2843 			case 16:
2844 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2845 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2846 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2847 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2848 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2849 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2850 				break;
2851 			case 12:
2852 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2853 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2854 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2855 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2856 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2857 				break;
2858 			case 8:
2859 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2860 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2861 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2862 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2863 				break;
2864 			case 4:
2865 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2866 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2867 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2868 				break;
2869 			case 2:
2870 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2871 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2872 				break;
2873 			case 1:
2874 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2875 				break;
2876 			default:
2877 				break;
2878 			}
2879 		} else {
2880 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2881 		}
2882 	}
2883 }
2884 
2885