1d38ceaf9SAlex Deucher /*
2d38ceaf9SAlex Deucher  * Copyright 2007-8 Advanced Micro Devices, Inc.
3d38ceaf9SAlex Deucher  * Copyright 2008 Red Hat Inc.
4d38ceaf9SAlex Deucher  *
5d38ceaf9SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6d38ceaf9SAlex Deucher  * copy of this software and associated documentation files (the "Software"),
7d38ceaf9SAlex Deucher  * to deal in the Software without restriction, including without limitation
8d38ceaf9SAlex Deucher  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9d38ceaf9SAlex Deucher  * and/or sell copies of the Software, and to permit persons to whom the
10d38ceaf9SAlex Deucher  * Software is furnished to do so, subject to the following conditions:
11d38ceaf9SAlex Deucher  *
12d38ceaf9SAlex Deucher  * The above copyright notice and this permission notice shall be included in
13d38ceaf9SAlex Deucher  * all copies or substantial portions of the Software.
14d38ceaf9SAlex Deucher  *
15d38ceaf9SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16d38ceaf9SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17d38ceaf9SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18d38ceaf9SAlex Deucher  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19d38ceaf9SAlex Deucher  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20d38ceaf9SAlex Deucher  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21d38ceaf9SAlex Deucher  * OTHER DEALINGS IN THE SOFTWARE.
22d38ceaf9SAlex Deucher  *
23d38ceaf9SAlex Deucher  * Authors: Dave Airlie
24d38ceaf9SAlex Deucher  *          Alex Deucher
25d38ceaf9SAlex Deucher  */
26fdf2f6c5SSam Ravnborg 
27d38ceaf9SAlex Deucher #include <drm/amdgpu_drm.h>
28d38ceaf9SAlex Deucher #include "amdgpu.h"
29d38ceaf9SAlex Deucher #include "amdgpu_i2c.h"
30d38ceaf9SAlex Deucher #include "atom.h"
31d38ceaf9SAlex Deucher #include "amdgpu_connectors.h"
325d43be0cSChristian König #include "amdgpu_display.h"
33543036a2SAurabindo Pillai #include "soc15_common.h"
34543036a2SAurabindo Pillai #include "gc/gc_11_0_0_offset.h"
35543036a2SAurabindo Pillai #include "gc/gc_11_0_0_sh_mask.h"
36*fe151ed7SAlex Deucher #include "bif/bif_4_1_d.h"
37d38ceaf9SAlex Deucher #include <asm/div64.h>
38d38ceaf9SAlex Deucher 
39fdf2f6c5SSam Ravnborg #include <linux/pci.h>
40d38ceaf9SAlex Deucher #include <linux/pm_runtime.h>
41d38ceaf9SAlex Deucher #include <drm/drm_crtc_helper.h>
421c6b6bd0SHamza Mahfooz #include <drm/drm_damage_helper.h>
431c6b6bd0SHamza Mahfooz #include <drm/drm_drv.h>
44d38ceaf9SAlex Deucher #include <drm/drm_edid.h>
45ab77e02cSNoralf Trønnes #include <drm/drm_fb_helper.h>
4645b64fd9SThomas Zimmermann #include <drm/drm_gem_framebuffer_helper.h>
4708d76915SBas Nieuwenhuizen #include <drm/drm_fourcc.h>
48973ad627SThomas Zimmermann #include <drm/drm_modeset_helper.h>
49fdf2f6c5SSam Ravnborg #include <drm/drm_vblank.h>
50d38ceaf9SAlex Deucher 
51a347ca97SAlex Deucher /**
52a347ca97SAlex Deucher  * amdgpu_display_hotplug_work_func - work handler for display hotplug event
53a347ca97SAlex Deucher  *
54a347ca97SAlex Deucher  * @work: work struct pointer
55a347ca97SAlex Deucher  *
56a347ca97SAlex Deucher  * This is the hotplug event work handler (all ASICs).
57a347ca97SAlex Deucher  * The work gets scheduled from the IRQ handler if there
58a347ca97SAlex Deucher  * was a hotplug interrupt.  It walks through the connector table
59a347ca97SAlex Deucher  * and calls hotplug handler for each connector. After this, it sends
60a347ca97SAlex Deucher  * a DRM hotplug event to alert userspace.
61a347ca97SAlex Deucher  *
62a347ca97SAlex Deucher  * This design approach is required in order to defer hotplug event handling
63a347ca97SAlex Deucher  * from the IRQ handler to a work handler because hotplug handler has to use
64a347ca97SAlex Deucher  * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
65a347ca97SAlex Deucher  * sleep).
66a347ca97SAlex Deucher  */
amdgpu_display_hotplug_work_func(struct work_struct * work)67a347ca97SAlex Deucher void amdgpu_display_hotplug_work_func(struct work_struct *work)
68a347ca97SAlex Deucher {
69a347ca97SAlex Deucher 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
7090f56611Sxurui 						  hotplug_work.work);
71a347ca97SAlex Deucher 	struct drm_device *dev = adev_to_drm(adev);
72a347ca97SAlex Deucher 	struct drm_mode_config *mode_config = &dev->mode_config;
73a347ca97SAlex Deucher 	struct drm_connector *connector;
74a347ca97SAlex Deucher 	struct drm_connector_list_iter iter;
75a347ca97SAlex Deucher 
76a347ca97SAlex Deucher 	mutex_lock(&mode_config->mutex);
77a347ca97SAlex Deucher 	drm_connector_list_iter_begin(dev, &iter);
78a347ca97SAlex Deucher 	drm_for_each_connector_iter(connector, &iter)
79a347ca97SAlex Deucher 		amdgpu_connector_hotplug(connector);
80a347ca97SAlex Deucher 	drm_connector_list_iter_end(&iter);
81a347ca97SAlex Deucher 	mutex_unlock(&mode_config->mutex);
82a347ca97SAlex Deucher 	/* Just fire off a uevent and let userspace tell us what to do */
83a347ca97SAlex Deucher 	drm_helper_hpd_irq_event(dev);
84a347ca97SAlex Deucher }
85a347ca97SAlex Deucher 
8631d5c523SAlex Deucher static int amdgpu_display_framebuffer_init(struct drm_device *dev,
8731d5c523SAlex Deucher 					   struct amdgpu_framebuffer *rfb,
8831d5c523SAlex Deucher 					   const struct drm_mode_fb_cmd2 *mode_cmd,
8931d5c523SAlex Deucher 					   struct drm_gem_object *obj);
9031d5c523SAlex Deucher 
amdgpu_display_flip_callback(struct dma_fence * f,struct dma_fence_cb * cb)913a05dc00SSamuel Li static void amdgpu_display_flip_callback(struct dma_fence *f,
923a05dc00SSamuel Li 					 struct dma_fence_cb *cb)
93c3874b75SChristian König {
94c3874b75SChristian König 	struct amdgpu_flip_work *work =
95c3874b75SChristian König 		container_of(cb, struct amdgpu_flip_work, cb);
96c3874b75SChristian König 
97f54d1867SChris Wilson 	dma_fence_put(f);
98325cbba1SMichel Dänzer 	schedule_work(&work->flip_work.work);
99c3874b75SChristian König }
100c3874b75SChristian König 
amdgpu_display_flip_handle_fence(struct amdgpu_flip_work * work,struct dma_fence ** f)1013a05dc00SSamuel Li static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
102f54d1867SChris Wilson 					     struct dma_fence **f)
1031ffd2652SChristian König {
104f54d1867SChris Wilson 	struct dma_fence *fence = *f;
1051ffd2652SChristian König 
106c3874b75SChristian König 	if (fence == NULL)
107c3874b75SChristian König 		return false;
1081ffd2652SChristian König 
1091ffd2652SChristian König 	*f = NULL;
110c3874b75SChristian König 
1113a05dc00SSamuel Li 	if (!dma_fence_add_callback(fence, &work->cb,
1123a05dc00SSamuel Li 				    amdgpu_display_flip_callback))
113c3874b75SChristian König 		return true;
114c3874b75SChristian König 
115f54d1867SChris Wilson 	dma_fence_put(fence);
116c3874b75SChristian König 	return false;
1171ffd2652SChristian König }
118d38ceaf9SAlex Deucher 
amdgpu_display_flip_work_func(struct work_struct * __work)1193a05dc00SSamuel Li static void amdgpu_display_flip_work_func(struct work_struct *__work)
120d38ceaf9SAlex Deucher {
121325cbba1SMichel Dänzer 	struct delayed_work *delayed_work =
122325cbba1SMichel Dänzer 		container_of(__work, struct delayed_work, work);
123d38ceaf9SAlex Deucher 	struct amdgpu_flip_work *work =
124325cbba1SMichel Dänzer 		container_of(delayed_work, struct amdgpu_flip_work, flip_work);
125d38ceaf9SAlex Deucher 	struct amdgpu_device *adev = work->adev;
126f93932bcSAlex Deucher 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
127d38ceaf9SAlex Deucher 
128f93932bcSAlex Deucher 	struct drm_crtc *crtc = &amdgpu_crtc->base;
129d38ceaf9SAlex Deucher 	unsigned long flags;
13093125cb7SSrinivasan Shanmugam 	unsigned int i;
131325cbba1SMichel Dänzer 	int vpos, hpos;
132d38ceaf9SAlex Deucher 
1331ffd2652SChristian König 	for (i = 0; i < work->shared_count; ++i)
1343a05dc00SSamuel Li 		if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
135c3874b75SChristian König 			return;
136d38ceaf9SAlex Deucher 
137325cbba1SMichel Dänzer 	/* Wait until we're out of the vertical blank period before the one
138325cbba1SMichel Dänzer 	 * targeted by the flip
139325cbba1SMichel Dänzer 	 */
140f93932bcSAlex Deucher 	if (amdgpu_crtc->enabled &&
1414a580877SLuben Tuikov 	    (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
142325cbba1SMichel Dänzer 						&vpos, &hpos, NULL, NULL,
143325cbba1SMichel Dänzer 						&crtc->hwmode)
144325cbba1SMichel Dänzer 	     & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
145325cbba1SMichel Dänzer 	    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
146325cbba1SMichel Dänzer 	    (int)(work->target_vblank -
147e3eff4b5SThomas Zimmermann 		  amdgpu_get_vblank_counter_kms(crtc)) > 0) {
148325cbba1SMichel Dänzer 		schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
149325cbba1SMichel Dänzer 		return;
150325cbba1SMichel Dänzer 	}
151325cbba1SMichel Dänzer 
152d38ceaf9SAlex Deucher 	/* We borrow the event spin lock for protecting flip_status */
153d38ceaf9SAlex Deucher 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
154d38ceaf9SAlex Deucher 
155bd4c72d1SAndrey Grodzovsky 	/* Do the flip (mmio) */
156cb9e59d7SAlex Deucher 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
157bd4c72d1SAndrey Grodzovsky 
158bd4c72d1SAndrey Grodzovsky 	/* Set the flip status */
159f93932bcSAlex Deucher 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
160d38ceaf9SAlex Deucher 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
1616bd9e877SVitaly Prosyak 
162bd4c72d1SAndrey Grodzovsky 
1639f07550bSSean Paul 	drm_dbg_vbl(adev_to_drm(adev),
1649f07550bSSean Paul 		    "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
165f93932bcSAlex Deucher 		    amdgpu_crtc->crtc_id, amdgpu_crtc, work);
166bd4c72d1SAndrey Grodzovsky 
167d38ceaf9SAlex Deucher }
168d38ceaf9SAlex Deucher 
169d38ceaf9SAlex Deucher /*
170d38ceaf9SAlex Deucher  * Handle unpin events outside the interrupt handler proper.
171d38ceaf9SAlex Deucher  */
amdgpu_display_unpin_work_func(struct work_struct * __work)1723a05dc00SSamuel Li static void amdgpu_display_unpin_work_func(struct work_struct *__work)
173d38ceaf9SAlex Deucher {
174d38ceaf9SAlex Deucher 	struct amdgpu_flip_work *work =
175d38ceaf9SAlex Deucher 		container_of(__work, struct amdgpu_flip_work, unpin_work);
176d38ceaf9SAlex Deucher 	int r;
177d38ceaf9SAlex Deucher 
178d38ceaf9SAlex Deucher 	/* unpin of the old buffer */
179c81a1a74SMichel Dänzer 	r = amdgpu_bo_reserve(work->old_abo, true);
180d38ceaf9SAlex Deucher 	if (likely(r == 0)) {
1814671078eSChristian König 		amdgpu_bo_unpin(work->old_abo);
182765e7fbfSChristian König 		amdgpu_bo_unreserve(work->old_abo);
183d38ceaf9SAlex Deucher 	} else
184d38ceaf9SAlex Deucher 		DRM_ERROR("failed to reserve buffer after flip\n");
185d38ceaf9SAlex Deucher 
186765e7fbfSChristian König 	amdgpu_bo_unref(&work->old_abo);
1871ffd2652SChristian König 	kfree(work->shared);
188d38ceaf9SAlex Deucher 	kfree(work);
189d38ceaf9SAlex Deucher }
190d38ceaf9SAlex Deucher 
amdgpu_display_crtc_page_flip_target(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t page_flip_flags,uint32_t target,struct drm_modeset_acquire_ctx * ctx)1910cd11932SSamuel Li int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
192d38ceaf9SAlex Deucher 				struct drm_framebuffer *fb,
193d38ceaf9SAlex Deucher 				struct drm_pending_vblank_event *event,
1945f42aa39SHarry Wentland 				uint32_t page_flip_flags, uint32_t target,
1955f42aa39SHarry Wentland 				struct drm_modeset_acquire_ctx *ctx)
196d38ceaf9SAlex Deucher {
197d38ceaf9SAlex Deucher 	struct drm_device *dev = crtc->dev;
1981348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
199d38ceaf9SAlex Deucher 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
200d38ceaf9SAlex Deucher 	struct drm_gem_object *obj;
201d38ceaf9SAlex Deucher 	struct amdgpu_flip_work *work;
202765e7fbfSChristian König 	struct amdgpu_bo *new_abo;
203d38ceaf9SAlex Deucher 	unsigned long flags;
204d38ceaf9SAlex Deucher 	u64 tiling_flags;
2055f42aa39SHarry Wentland 	int i, r;
206d38ceaf9SAlex Deucher 
20793125cb7SSrinivasan Shanmugam 	work = kzalloc(sizeof(*work), GFP_KERNEL);
208d38ceaf9SAlex Deucher 	if (work == NULL)
209d38ceaf9SAlex Deucher 		return -ENOMEM;
210d38ceaf9SAlex Deucher 
2113a05dc00SSamuel Li 	INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
2123a05dc00SSamuel Li 	INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
213d38ceaf9SAlex Deucher 
214d38ceaf9SAlex Deucher 	work->event = event;
215d38ceaf9SAlex Deucher 	work->adev = adev;
216d38ceaf9SAlex Deucher 	work->crtc_id = amdgpu_crtc->crtc_id;
217cb9e59d7SAlex Deucher 	work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
218d38ceaf9SAlex Deucher 
219d38ceaf9SAlex Deucher 	/* schedule unpin of the old buffer */
220e68d14ddSDaniel Stone 	obj = crtc->primary->fb->obj[0];
221d38ceaf9SAlex Deucher 
222d38ceaf9SAlex Deucher 	/* take a reference to the old object */
223765e7fbfSChristian König 	work->old_abo = gem_to_amdgpu_bo(obj);
224765e7fbfSChristian König 	amdgpu_bo_ref(work->old_abo);
225d38ceaf9SAlex Deucher 
226e68d14ddSDaniel Stone 	obj = fb->obj[0];
227765e7fbfSChristian König 	new_abo = gem_to_amdgpu_bo(obj);
228d38ceaf9SAlex Deucher 
229d38ceaf9SAlex Deucher 	/* pin the new buffer */
230765e7fbfSChristian König 	r = amdgpu_bo_reserve(new_abo, false);
231d38ceaf9SAlex Deucher 	if (unlikely(r != 0)) {
232765e7fbfSChristian König 		DRM_ERROR("failed to reserve new abo buffer before flip\n");
233d38ceaf9SAlex Deucher 		goto cleanup;
234d38ceaf9SAlex Deucher 	}
235d38ceaf9SAlex Deucher 
23647bbcc1eSEmily Deng 	if (!adev->enable_virtual_display) {
23754b86443SChristian König 		new_abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
238f2bd8a0eSAndrey Grodzovsky 		r = amdgpu_bo_pin(new_abo,
239f2bd8a0eSAndrey Grodzovsky 				  amdgpu_display_supported_domains(adev, new_abo->flags));
240d38ceaf9SAlex Deucher 		if (unlikely(r != 0)) {
241765e7fbfSChristian König 			DRM_ERROR("failed to pin new abo buffer before flip\n");
242ee7fd957SMichel Dänzer 			goto unreserve;
243d38ceaf9SAlex Deucher 		}
24447bbcc1eSEmily Deng 	}
245d38ceaf9SAlex Deucher 
246bb812f1eSJunwei Zhang 	r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
247bb812f1eSJunwei Zhang 	if (unlikely(r != 0)) {
248bb812f1eSJunwei Zhang 		DRM_ERROR("%p bind failed\n", new_abo);
249bb812f1eSJunwei Zhang 		goto unpin;
250bb812f1eSJunwei Zhang 	}
251bb812f1eSJunwei Zhang 
2527bc80a54SChristian König 	r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
25375ab2b36SChristian König 				&work->shared_count,
25475ab2b36SChristian König 				&work->shared);
2551ffd2652SChristian König 	if (unlikely(r != 0)) {
2561ffd2652SChristian König 		DRM_ERROR("failed to get fences for buffer\n");
257ee7fd957SMichel Dänzer 		goto unpin;
2581ffd2652SChristian König 	}
2591ffd2652SChristian König 
260765e7fbfSChristian König 	amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
261765e7fbfSChristian König 	amdgpu_bo_unreserve(new_abo);
262d38ceaf9SAlex Deucher 
26347bbcc1eSEmily Deng 	if (!adev->enable_virtual_display)
2647b7c6c81SJunwei Zhang 		work->base = amdgpu_bo_gpu_offset(new_abo);
26523effc11SDhinakaran Pandiyan 	work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
266e3eff4b5SThomas Zimmermann 		amdgpu_get_vblank_counter_kms(crtc);
267d38ceaf9SAlex Deucher 
268d38ceaf9SAlex Deucher 	/* we borrow the event spin lock for protecting flip_wrok */
269d38ceaf9SAlex Deucher 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
270d38ceaf9SAlex Deucher 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
271d38ceaf9SAlex Deucher 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
272d38ceaf9SAlex Deucher 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
273d38ceaf9SAlex Deucher 		r = -EBUSY;
274325cbba1SMichel Dänzer 		goto pflip_cleanup;
2759c5b2b0dSHarry Wentland 	}
2769c5b2b0dSHarry Wentland 
2779c5b2b0dSHarry Wentland 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
2789c5b2b0dSHarry Wentland 	amdgpu_crtc->pflip_works = work;
2799c5b2b0dSHarry Wentland 
2805f42aa39SHarry Wentland 
2815f42aa39SHarry Wentland 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
2825f42aa39SHarry Wentland 					 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
2839c5b2b0dSHarry Wentland 	/* update crtc fb */
2849c5b2b0dSHarry Wentland 	crtc->primary->fb = fb;
2859c5b2b0dSHarry Wentland 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
2863a05dc00SSamuel Li 	amdgpu_display_flip_work_func(&work->flip_work.work);
2879c5b2b0dSHarry Wentland 	return 0;
2885f42aa39SHarry Wentland 
2895f42aa39SHarry Wentland pflip_cleanup:
2905f42aa39SHarry Wentland 	if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
2915f42aa39SHarry Wentland 		DRM_ERROR("failed to reserve new abo in error path\n");
2925f42aa39SHarry Wentland 		goto cleanup;
2935f42aa39SHarry Wentland 	}
2945f42aa39SHarry Wentland unpin:
29547bbcc1eSEmily Deng 	if (!adev->enable_virtual_display)
2964671078eSChristian König 		amdgpu_bo_unpin(new_abo);
29747bbcc1eSEmily Deng 
2985f42aa39SHarry Wentland unreserve:
2995f42aa39SHarry Wentland 	amdgpu_bo_unreserve(new_abo);
3005f42aa39SHarry Wentland 
3015f42aa39SHarry Wentland cleanup:
3025f42aa39SHarry Wentland 	amdgpu_bo_unref(&work->old_abo);
3035f42aa39SHarry Wentland 	for (i = 0; i < work->shared_count; ++i)
3045f42aa39SHarry Wentland 		dma_fence_put(work->shared[i]);
3055f42aa39SHarry Wentland 	kfree(work->shared);
3065f42aa39SHarry Wentland 	kfree(work);
3075f42aa39SHarry Wentland 
3085f42aa39SHarry Wentland 	return r;
309d38ceaf9SAlex Deucher }
310d38ceaf9SAlex Deucher 
amdgpu_display_crtc_set_config(struct drm_mode_set * set,struct drm_modeset_acquire_ctx * ctx)311775a8364SSamuel Li int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
312a4eff9aaSDaniel Vetter 				   struct drm_modeset_acquire_ctx *ctx)
313d38ceaf9SAlex Deucher {
314d38ceaf9SAlex Deucher 	struct drm_device *dev;
315d38ceaf9SAlex Deucher 	struct amdgpu_device *adev;
316d38ceaf9SAlex Deucher 	struct drm_crtc *crtc;
317d38ceaf9SAlex Deucher 	bool active = false;
318d38ceaf9SAlex Deucher 	int ret;
319d38ceaf9SAlex Deucher 
320d38ceaf9SAlex Deucher 	if (!set || !set->crtc)
321d38ceaf9SAlex Deucher 		return -EINVAL;
322d38ceaf9SAlex Deucher 
323d38ceaf9SAlex Deucher 	dev = set->crtc->dev;
324d38ceaf9SAlex Deucher 
325d38ceaf9SAlex Deucher 	ret = pm_runtime_get_sync(dev->dev);
326d38ceaf9SAlex Deucher 	if (ret < 0)
327e008fa6fSNavid Emamdoost 		goto out;
328d38ceaf9SAlex Deucher 
329a4eff9aaSDaniel Vetter 	ret = drm_crtc_helper_set_config(set, ctx);
330d38ceaf9SAlex Deucher 
331d38ceaf9SAlex Deucher 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
332d38ceaf9SAlex Deucher 		if (crtc->enabled)
333d38ceaf9SAlex Deucher 			active = true;
334d38ceaf9SAlex Deucher 
335d38ceaf9SAlex Deucher 	pm_runtime_mark_last_busy(dev->dev);
336d38ceaf9SAlex Deucher 
3371348969aSLuben Tuikov 	adev = drm_to_adev(dev);
338d38ceaf9SAlex Deucher 	/* if we have active crtcs and we don't have a power ref,
33993125cb7SSrinivasan Shanmugam 	 * take the current one
34093125cb7SSrinivasan Shanmugam 	 */
341d38ceaf9SAlex Deucher 	if (active && !adev->have_disp_power_ref) {
342d38ceaf9SAlex Deucher 		adev->have_disp_power_ref = true;
3433514521cSJean Delvare 		return ret;
344d38ceaf9SAlex Deucher 	}
3450e6a1288SPrike Liang 	/* if we have no active crtcs, then go to
3460e6a1288SPrike Liang 	 * drop the power ref we got before
34793125cb7SSrinivasan Shanmugam 	 */
3480e6a1288SPrike Liang 	if (!active && adev->have_disp_power_ref)
349d38ceaf9SAlex Deucher 		adev->have_disp_power_ref = false;
350e008fa6fSNavid Emamdoost out:
351d38ceaf9SAlex Deucher 	/* drop the power reference we got coming in here */
352d38ceaf9SAlex Deucher 	pm_runtime_put_autosuspend(dev->dev);
353d38ceaf9SAlex Deucher 	return ret;
354d38ceaf9SAlex Deucher }
355d38ceaf9SAlex Deucher 
356c6e14f40SEmily Deng static const char *encoder_names[41] = {
357d38ceaf9SAlex Deucher 	"NONE",
358d38ceaf9SAlex Deucher 	"INTERNAL_LVDS",
359d38ceaf9SAlex Deucher 	"INTERNAL_TMDS1",
360d38ceaf9SAlex Deucher 	"INTERNAL_TMDS2",
361d38ceaf9SAlex Deucher 	"INTERNAL_DAC1",
362d38ceaf9SAlex Deucher 	"INTERNAL_DAC2",
363d38ceaf9SAlex Deucher 	"INTERNAL_SDVOA",
364d38ceaf9SAlex Deucher 	"INTERNAL_SDVOB",
365d38ceaf9SAlex Deucher 	"SI170B",
366d38ceaf9SAlex Deucher 	"CH7303",
367d38ceaf9SAlex Deucher 	"CH7301",
368d38ceaf9SAlex Deucher 	"INTERNAL_DVO1",
369d38ceaf9SAlex Deucher 	"EXTERNAL_SDVOA",
370d38ceaf9SAlex Deucher 	"EXTERNAL_SDVOB",
371d38ceaf9SAlex Deucher 	"TITFP513",
372d38ceaf9SAlex Deucher 	"INTERNAL_LVTM1",
373d38ceaf9SAlex Deucher 	"VT1623",
374d38ceaf9SAlex Deucher 	"HDMI_SI1930",
375d38ceaf9SAlex Deucher 	"HDMI_INTERNAL",
376d38ceaf9SAlex Deucher 	"INTERNAL_KLDSCP_TMDS1",
377d38ceaf9SAlex Deucher 	"INTERNAL_KLDSCP_DVO1",
378d38ceaf9SAlex Deucher 	"INTERNAL_KLDSCP_DAC1",
379d38ceaf9SAlex Deucher 	"INTERNAL_KLDSCP_DAC2",
380d38ceaf9SAlex Deucher 	"SI178",
381d38ceaf9SAlex Deucher 	"MVPU_FPGA",
382d38ceaf9SAlex Deucher 	"INTERNAL_DDI",
383d38ceaf9SAlex Deucher 	"VT1625",
384d38ceaf9SAlex Deucher 	"HDMI_SI1932",
385d38ceaf9SAlex Deucher 	"DP_AN9801",
386d38ceaf9SAlex Deucher 	"DP_DP501",
387d38ceaf9SAlex Deucher 	"INTERNAL_UNIPHY",
388d38ceaf9SAlex Deucher 	"INTERNAL_KLDSCP_LVTMA",
389d38ceaf9SAlex Deucher 	"INTERNAL_UNIPHY1",
390d38ceaf9SAlex Deucher 	"INTERNAL_UNIPHY2",
391d38ceaf9SAlex Deucher 	"NUTMEG",
392d38ceaf9SAlex Deucher 	"TRAVIS",
393d38ceaf9SAlex Deucher 	"INTERNAL_VCE",
394d38ceaf9SAlex Deucher 	"INTERNAL_UNIPHY3",
395c6e14f40SEmily Deng 	"HDMI_ANX9805",
396c6e14f40SEmily Deng 	"INTERNAL_AMCLK",
397c6e14f40SEmily Deng 	"VIRTUAL",
398d38ceaf9SAlex Deucher };
399d38ceaf9SAlex Deucher 
400d38ceaf9SAlex Deucher static const char *hpd_names[6] = {
401d38ceaf9SAlex Deucher 	"HPD1",
402d38ceaf9SAlex Deucher 	"HPD2",
403d38ceaf9SAlex Deucher 	"HPD3",
404d38ceaf9SAlex Deucher 	"HPD4",
405d38ceaf9SAlex Deucher 	"HPD5",
406d38ceaf9SAlex Deucher 	"HPD6",
407d38ceaf9SAlex Deucher };
408d38ceaf9SAlex Deucher 
amdgpu_display_print_display_setup(struct drm_device * dev)40950af9193SSamuel Li void amdgpu_display_print_display_setup(struct drm_device *dev)
410d38ceaf9SAlex Deucher {
411d38ceaf9SAlex Deucher 	struct drm_connector *connector;
412d38ceaf9SAlex Deucher 	struct amdgpu_connector *amdgpu_connector;
413d38ceaf9SAlex Deucher 	struct drm_encoder *encoder;
414d38ceaf9SAlex Deucher 	struct amdgpu_encoder *amdgpu_encoder;
415f8d2d39eSLyude Paul 	struct drm_connector_list_iter iter;
416d38ceaf9SAlex Deucher 	uint32_t devices;
417d38ceaf9SAlex Deucher 	int i = 0;
418d38ceaf9SAlex Deucher 
419f8d2d39eSLyude Paul 	drm_connector_list_iter_begin(dev, &iter);
420d38ceaf9SAlex Deucher 	DRM_INFO("AMDGPU Display Connectors\n");
421f8d2d39eSLyude Paul 	drm_for_each_connector_iter(connector, &iter) {
422d38ceaf9SAlex Deucher 		amdgpu_connector = to_amdgpu_connector(connector);
423d38ceaf9SAlex Deucher 		DRM_INFO("Connector %d:\n", i);
424d38ceaf9SAlex Deucher 		DRM_INFO("  %s\n", connector->name);
425d38ceaf9SAlex Deucher 		if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
426d38ceaf9SAlex Deucher 			DRM_INFO("  %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
427d38ceaf9SAlex Deucher 		if (amdgpu_connector->ddc_bus) {
428d38ceaf9SAlex Deucher 			DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
429d38ceaf9SAlex Deucher 				 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
430d38ceaf9SAlex Deucher 				 amdgpu_connector->ddc_bus->rec.mask_data_reg,
431d38ceaf9SAlex Deucher 				 amdgpu_connector->ddc_bus->rec.a_clk_reg,
432d38ceaf9SAlex Deucher 				 amdgpu_connector->ddc_bus->rec.a_data_reg,
433d38ceaf9SAlex Deucher 				 amdgpu_connector->ddc_bus->rec.en_clk_reg,
434d38ceaf9SAlex Deucher 				 amdgpu_connector->ddc_bus->rec.en_data_reg,
435d38ceaf9SAlex Deucher 				 amdgpu_connector->ddc_bus->rec.y_clk_reg,
436d38ceaf9SAlex Deucher 				 amdgpu_connector->ddc_bus->rec.y_data_reg);
437d38ceaf9SAlex Deucher 			if (amdgpu_connector->router.ddc_valid)
438d38ceaf9SAlex Deucher 				DRM_INFO("  DDC Router 0x%x/0x%x\n",
439d38ceaf9SAlex Deucher 					 amdgpu_connector->router.ddc_mux_control_pin,
440d38ceaf9SAlex Deucher 					 amdgpu_connector->router.ddc_mux_state);
441d38ceaf9SAlex Deucher 			if (amdgpu_connector->router.cd_valid)
442d38ceaf9SAlex Deucher 				DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
443d38ceaf9SAlex Deucher 					 amdgpu_connector->router.cd_mux_control_pin,
444d38ceaf9SAlex Deucher 					 amdgpu_connector->router.cd_mux_state);
445d38ceaf9SAlex Deucher 		} else {
446d38ceaf9SAlex Deucher 			if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
447d38ceaf9SAlex Deucher 			    connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
448d38ceaf9SAlex Deucher 			    connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
449d38ceaf9SAlex Deucher 			    connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
450d38ceaf9SAlex Deucher 			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
451d38ceaf9SAlex Deucher 			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
452d38ceaf9SAlex Deucher 				DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to [email protected]\n");
453d38ceaf9SAlex Deucher 		}
454d38ceaf9SAlex Deucher 		DRM_INFO("  Encoders:\n");
455d38ceaf9SAlex Deucher 		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
456d38ceaf9SAlex Deucher 			amdgpu_encoder = to_amdgpu_encoder(encoder);
457d38ceaf9SAlex Deucher 			devices = amdgpu_encoder->devices & amdgpu_connector->devices;
458d38ceaf9SAlex Deucher 			if (devices) {
459d38ceaf9SAlex Deucher 				if (devices & ATOM_DEVICE_CRT1_SUPPORT)
460d38ceaf9SAlex Deucher 					DRM_INFO("    CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
461d38ceaf9SAlex Deucher 				if (devices & ATOM_DEVICE_CRT2_SUPPORT)
462d38ceaf9SAlex Deucher 					DRM_INFO("    CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
463d38ceaf9SAlex Deucher 				if (devices & ATOM_DEVICE_LCD1_SUPPORT)
464d38ceaf9SAlex Deucher 					DRM_INFO("    LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
465d38ceaf9SAlex Deucher 				if (devices & ATOM_DEVICE_DFP1_SUPPORT)
466d38ceaf9SAlex Deucher 					DRM_INFO("    DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
467d38ceaf9SAlex Deucher 				if (devices & ATOM_DEVICE_DFP2_SUPPORT)
468d38ceaf9SAlex Deucher 					DRM_INFO("    DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
469d38ceaf9SAlex Deucher 				if (devices & ATOM_DEVICE_DFP3_SUPPORT)
470d38ceaf9SAlex Deucher 					DRM_INFO("    DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
471d38ceaf9SAlex Deucher 				if (devices & ATOM_DEVICE_DFP4_SUPPORT)
472d38ceaf9SAlex Deucher 					DRM_INFO("    DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
473d38ceaf9SAlex Deucher 				if (devices & ATOM_DEVICE_DFP5_SUPPORT)
474d38ceaf9SAlex Deucher 					DRM_INFO("    DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
475d38ceaf9SAlex Deucher 				if (devices & ATOM_DEVICE_DFP6_SUPPORT)
476d38ceaf9SAlex Deucher 					DRM_INFO("    DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
477d38ceaf9SAlex Deucher 				if (devices & ATOM_DEVICE_TV1_SUPPORT)
478d38ceaf9SAlex Deucher 					DRM_INFO("    TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
479d38ceaf9SAlex Deucher 				if (devices & ATOM_DEVICE_CV_SUPPORT)
480d38ceaf9SAlex Deucher 					DRM_INFO("    CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
481d38ceaf9SAlex Deucher 			}
482d38ceaf9SAlex Deucher 		}
483d38ceaf9SAlex Deucher 		i++;
484d38ceaf9SAlex Deucher 	}
485f8d2d39eSLyude Paul 	drm_connector_list_iter_end(&iter);
486d38ceaf9SAlex Deucher }
487d38ceaf9SAlex Deucher 
amdgpu_display_ddc_probe(struct amdgpu_connector * amdgpu_connector,bool use_aux)488e0b5b5ecSSamuel Li bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
489d38ceaf9SAlex Deucher 			      bool use_aux)
490d38ceaf9SAlex Deucher {
491d38ceaf9SAlex Deucher 	u8 out = 0x0;
492d38ceaf9SAlex Deucher 	u8 buf[8];
493d38ceaf9SAlex Deucher 	int ret;
494d38ceaf9SAlex Deucher 	struct i2c_msg msgs[] = {
495d38ceaf9SAlex Deucher 		{
496d38ceaf9SAlex Deucher 			.addr = DDC_ADDR,
497d38ceaf9SAlex Deucher 			.flags = 0,
498d38ceaf9SAlex Deucher 			.len = 1,
499d38ceaf9SAlex Deucher 			.buf = &out,
500d38ceaf9SAlex Deucher 		},
501d38ceaf9SAlex Deucher 		{
502d38ceaf9SAlex Deucher 			.addr = DDC_ADDR,
503d38ceaf9SAlex Deucher 			.flags = I2C_M_RD,
504d38ceaf9SAlex Deucher 			.len = 8,
505d38ceaf9SAlex Deucher 			.buf = buf,
506d38ceaf9SAlex Deucher 		}
507d38ceaf9SAlex Deucher 	};
508d38ceaf9SAlex Deucher 
509d38ceaf9SAlex Deucher 	/* on hw with routers, select right port */
510d38ceaf9SAlex Deucher 	if (amdgpu_connector->router.ddc_valid)
511d38ceaf9SAlex Deucher 		amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
512d38ceaf9SAlex Deucher 
51393125cb7SSrinivasan Shanmugam 	if (use_aux)
514d38ceaf9SAlex Deucher 		ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
51593125cb7SSrinivasan Shanmugam 	else
516d38ceaf9SAlex Deucher 		ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
517d38ceaf9SAlex Deucher 
518d38ceaf9SAlex Deucher 	if (ret != 2)
519d38ceaf9SAlex Deucher 		/* Couldn't find an accessible DDC on this connector */
520d38ceaf9SAlex Deucher 		return false;
521d38ceaf9SAlex Deucher 	/* Probe also for valid EDID header
522d38ceaf9SAlex Deucher 	 * EDID header starts with:
523d38ceaf9SAlex Deucher 	 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
524d38ceaf9SAlex Deucher 	 * Only the first 6 bytes must be valid as
52593125cb7SSrinivasan Shanmugam 	 * drm_edid_block_valid() can fix the last 2 bytes
52693125cb7SSrinivasan Shanmugam 	 */
527d38ceaf9SAlex Deucher 	if (drm_edid_header_is_valid(buf) < 6) {
528d38ceaf9SAlex Deucher 		/* Couldn't find an accessible EDID on this
52993125cb7SSrinivasan Shanmugam 		 * connector
53093125cb7SSrinivasan Shanmugam 		 */
531d38ceaf9SAlex Deucher 		return false;
532d38ceaf9SAlex Deucher 	}
533d38ceaf9SAlex Deucher 	return true;
534d38ceaf9SAlex Deucher }
535d38ceaf9SAlex Deucher 
amdgpu_dirtyfb(struct drm_framebuffer * fb,struct drm_file * file,unsigned int flags,unsigned int color,struct drm_clip_rect * clips,unsigned int num_clips)5361c6b6bd0SHamza Mahfooz static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
5371c6b6bd0SHamza Mahfooz 			  unsigned int flags, unsigned int color,
5381c6b6bd0SHamza Mahfooz 			  struct drm_clip_rect *clips, unsigned int num_clips)
5391c6b6bd0SHamza Mahfooz {
5401c6b6bd0SHamza Mahfooz 
5411c6b6bd0SHamza Mahfooz 	if (file)
5421c6b6bd0SHamza Mahfooz 		return -ENOSYS;
5431c6b6bd0SHamza Mahfooz 
5441c6b6bd0SHamza Mahfooz 	return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
5451c6b6bd0SHamza Mahfooz 					 num_clips);
5461c6b6bd0SHamza Mahfooz }
5471c6b6bd0SHamza Mahfooz 
548d38ceaf9SAlex Deucher static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
549e68d14ddSDaniel Stone 	.destroy = drm_gem_fb_destroy,
550e68d14ddSDaniel Stone 	.create_handle = drm_gem_fb_create_handle,
551e9127f5eSAlex Deucher };
552e9127f5eSAlex Deucher 
5531c6b6bd0SHamza Mahfooz static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
5541c6b6bd0SHamza Mahfooz 	.destroy = drm_gem_fb_destroy,
5551c6b6bd0SHamza Mahfooz 	.create_handle = drm_gem_fb_create_handle,
5561c6b6bd0SHamza Mahfooz 	.dirty = amdgpu_dirtyfb
5571c6b6bd0SHamza Mahfooz };
5581c6b6bd0SHamza Mahfooz 
amdgpu_display_supported_domains(struct amdgpu_device * adev,uint64_t bo_flags)559f2bd8a0eSAndrey Grodzovsky uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
560f2bd8a0eSAndrey Grodzovsky 					  uint64_t bo_flags)
5615d43be0cSChristian König {
5625d43be0cSChristian König 	uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
5635d43be0cSChristian König 
5642c9c178bSAlex Deucher #if defined(CONFIG_DRM_AMD_DC)
565ddcb7fc6SAndrey Grodzovsky 	/*
566f2bd8a0eSAndrey Grodzovsky 	 * if amdgpu_bo_support_uswc returns false it means that USWC mappings
567ddcb7fc6SAndrey Grodzovsky 	 * is not supported for this board. But this mapping is required
568ddcb7fc6SAndrey Grodzovsky 	 * to avoid hang caused by placement of scanout BO in GTT on certain
569ddcb7fc6SAndrey Grodzovsky 	 * APUs. So force the BO placement to VRAM in case this architecture
570ddcb7fc6SAndrey Grodzovsky 	 * will not allow USWC mappings.
571f4d4f53fSBhaskar Chowdhury 	 * Also, don't allow GTT domain if the BO doesn't have USWC flag set.
572ddcb7fc6SAndrey Grodzovsky 	 */
573403c1ef0SAlex Deucher 	if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
574f2bd8a0eSAndrey Grodzovsky 	    amdgpu_bo_support_uswc(bo_flags) &&
575d09ef243SAlex Deucher 	    adev->dc_enabled &&
576a7f520bfSAlex Deucher 	    adev->mode_info.gpu_vm_support)
5775d43be0cSChristian König 		domain |= AMDGPU_GEM_DOMAIN_GTT;
5782c9c178bSAlex Deucher #endif
5795d43be0cSChristian König 
5805d43be0cSChristian König 	return domain;
5815d43be0cSChristian König }
5825d43be0cSChristian König 
583816853f9SBas Nieuwenhuizen static const struct drm_format_info dcc_formats[] = {
584816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
585816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
586816853f9SBas Nieuwenhuizen 	 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
587816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
588816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
589816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
590816853f9SBas Nieuwenhuizen 	   .has_alpha = true, },
591816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
592816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
593816853f9SBas Nieuwenhuizen 	  .has_alpha = true, },
594816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
595816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
596816853f9SBas Nieuwenhuizen 	  .has_alpha = true, },
597816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
598816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
599816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
600816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
601816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
602816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
603816853f9SBas Nieuwenhuizen 	  .has_alpha = true, },
604816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
605816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
606816853f9SBas Nieuwenhuizen 	  .has_alpha = true, },
607816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
608816853f9SBas Nieuwenhuizen 	  .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
609816853f9SBas Nieuwenhuizen };
610816853f9SBas Nieuwenhuizen 
611816853f9SBas Nieuwenhuizen static const struct drm_format_info dcc_retile_formats[] = {
612816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
613816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
614816853f9SBas Nieuwenhuizen 	 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
615816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
616816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
617816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
618816853f9SBas Nieuwenhuizen 	   .has_alpha = true, },
619816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
620816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
621816853f9SBas Nieuwenhuizen 	  .has_alpha = true, },
622816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
623816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
624816853f9SBas Nieuwenhuizen 	  .has_alpha = true, },
625816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
626816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
627816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
628816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
629816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
630816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
631816853f9SBas Nieuwenhuizen 	  .has_alpha = true, },
632816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
633816853f9SBas Nieuwenhuizen 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
634816853f9SBas Nieuwenhuizen 	  .has_alpha = true, },
635816853f9SBas Nieuwenhuizen 	{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
636816853f9SBas Nieuwenhuizen 	  .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
637816853f9SBas Nieuwenhuizen };
638816853f9SBas Nieuwenhuizen 
639816853f9SBas Nieuwenhuizen static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],int num_formats,u32 format)640816853f9SBas Nieuwenhuizen lookup_format_info(const struct drm_format_info formats[],
641816853f9SBas Nieuwenhuizen 		  int num_formats, u32 format)
642816853f9SBas Nieuwenhuizen {
643816853f9SBas Nieuwenhuizen 	int i;
644816853f9SBas Nieuwenhuizen 
645816853f9SBas Nieuwenhuizen 	for (i = 0; i < num_formats; i++) {
646816853f9SBas Nieuwenhuizen 		if (formats[i].format == format)
647816853f9SBas Nieuwenhuizen 			return &formats[i];
648816853f9SBas Nieuwenhuizen 	}
649816853f9SBas Nieuwenhuizen 
650816853f9SBas Nieuwenhuizen 	return NULL;
651816853f9SBas Nieuwenhuizen }
652816853f9SBas Nieuwenhuizen 
653816853f9SBas Nieuwenhuizen const struct drm_format_info *
amdgpu_lookup_format_info(u32 format,uint64_t modifier)654816853f9SBas Nieuwenhuizen amdgpu_lookup_format_info(u32 format, uint64_t modifier)
655816853f9SBas Nieuwenhuizen {
656816853f9SBas Nieuwenhuizen 	if (!IS_AMD_FMT_MOD(modifier))
657816853f9SBas Nieuwenhuizen 		return NULL;
658816853f9SBas Nieuwenhuizen 
659fd536d2eSMarek Olšák 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) < AMD_FMT_MOD_TILE_VER_GFX9 ||
660fd536d2eSMarek Olšák 	    AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12)
661fd536d2eSMarek Olšák 		return NULL;
662fd536d2eSMarek Olšák 
663816853f9SBas Nieuwenhuizen 	if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
664816853f9SBas Nieuwenhuizen 		return lookup_format_info(dcc_retile_formats,
665816853f9SBas Nieuwenhuizen 					  ARRAY_SIZE(dcc_retile_formats),
666816853f9SBas Nieuwenhuizen 					  format);
667816853f9SBas Nieuwenhuizen 
668816853f9SBas Nieuwenhuizen 	if (AMD_FMT_MOD_GET(DCC, modifier))
669816853f9SBas Nieuwenhuizen 		return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
670816853f9SBas Nieuwenhuizen 					  format);
671816853f9SBas Nieuwenhuizen 
672816853f9SBas Nieuwenhuizen 	/* returning NULL will cause the default format structs to be used. */
673816853f9SBas Nieuwenhuizen 	return NULL;
674816853f9SBas Nieuwenhuizen }
675816853f9SBas Nieuwenhuizen 
6761331e630SBas Nieuwenhuizen 
6771331e630SBas Nieuwenhuizen /*
6781331e630SBas Nieuwenhuizen  * Tries to extract the renderable DCC offset from the opaque metadata attached
6791331e630SBas Nieuwenhuizen  * to the buffer.
6801331e630SBas Nieuwenhuizen  */
6811331e630SBas Nieuwenhuizen static int
extract_render_dcc_offset(struct amdgpu_device * adev,struct drm_gem_object * obj,uint64_t * offset)6821331e630SBas Nieuwenhuizen extract_render_dcc_offset(struct amdgpu_device *adev,
6831331e630SBas Nieuwenhuizen 			  struct drm_gem_object *obj,
6841331e630SBas Nieuwenhuizen 			  uint64_t *offset)
6851331e630SBas Nieuwenhuizen {
6861331e630SBas Nieuwenhuizen 	struct amdgpu_bo *rbo;
6871331e630SBas Nieuwenhuizen 	int r = 0;
6881331e630SBas Nieuwenhuizen 	uint32_t metadata[10]; /* Something that fits a descriptor + header. */
6891331e630SBas Nieuwenhuizen 	uint32_t size;
6901331e630SBas Nieuwenhuizen 
6911331e630SBas Nieuwenhuizen 	rbo = gem_to_amdgpu_bo(obj);
6921331e630SBas Nieuwenhuizen 	r = amdgpu_bo_reserve(rbo, false);
6931331e630SBas Nieuwenhuizen 
6941331e630SBas Nieuwenhuizen 	if (unlikely(r)) {
6951331e630SBas Nieuwenhuizen 		/* Don't show error message when returning -ERESTARTSYS */
6961331e630SBas Nieuwenhuizen 		if (r != -ERESTARTSYS)
6971331e630SBas Nieuwenhuizen 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
6981331e630SBas Nieuwenhuizen 		return r;
6991331e630SBas Nieuwenhuizen 	}
7001331e630SBas Nieuwenhuizen 
7011331e630SBas Nieuwenhuizen 	r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
7021331e630SBas Nieuwenhuizen 	amdgpu_bo_unreserve(rbo);
7031331e630SBas Nieuwenhuizen 
7041331e630SBas Nieuwenhuizen 	if (r)
7051331e630SBas Nieuwenhuizen 		return r;
7061331e630SBas Nieuwenhuizen 
7071331e630SBas Nieuwenhuizen 	/*
7081331e630SBas Nieuwenhuizen 	 * The first word is the metadata version, and we need space for at least
7091331e630SBas Nieuwenhuizen 	 * the version + pci vendor+device id + 8 words for a descriptor.
7101331e630SBas Nieuwenhuizen 	 */
7111331e630SBas Nieuwenhuizen 	if (size < 40  || metadata[0] != 1)
7121331e630SBas Nieuwenhuizen 		return -EINVAL;
7131331e630SBas Nieuwenhuizen 
7141331e630SBas Nieuwenhuizen 	if (adev->family >= AMDGPU_FAMILY_NV) {
7151331e630SBas Nieuwenhuizen 		/* resource word 6/7 META_DATA_ADDRESS{_LO} */
7161331e630SBas Nieuwenhuizen 		*offset = ((u64)metadata[9] << 16u) |
7171331e630SBas Nieuwenhuizen 			  ((metadata[8] & 0xFF000000u) >> 16);
7181331e630SBas Nieuwenhuizen 	} else {
7191331e630SBas Nieuwenhuizen 		/* resource word 5/7 META_DATA_ADDRESS */
7201331e630SBas Nieuwenhuizen 		*offset = ((u64)metadata[9] << 8u) |
7211331e630SBas Nieuwenhuizen 			  ((u64)(metadata[7] & 0x1FE0000u) << 23);
7221331e630SBas Nieuwenhuizen 	}
7231331e630SBas Nieuwenhuizen 
7241331e630SBas Nieuwenhuizen 	return 0;
7251331e630SBas Nieuwenhuizen }
7261331e630SBas Nieuwenhuizen 
convert_tiling_flags_to_modifier_gfx12(struct amdgpu_framebuffer * afb)72796557f78SAurabindo Pillai static int convert_tiling_flags_to_modifier_gfx12(struct amdgpu_framebuffer *afb)
72896557f78SAurabindo Pillai {
72996557f78SAurabindo Pillai 	u64 modifier = 0;
730f340f2baSMarek Olšák 	int swizzle_mode = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE);
73196557f78SAurabindo Pillai 
732f340f2baSMarek Olšák 	if (!swizzle_mode) {
733f340f2baSMarek Olšák 		modifier = DRM_FORMAT_MOD_LINEAR;
734f340f2baSMarek Olšák 	} else {
735f340f2baSMarek Olšák 		int max_comp_block =
736f340f2baSMarek Olšák 			AMDGPU_TILING_GET(afb->tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
73796557f78SAurabindo Pillai 
73896557f78SAurabindo Pillai 		modifier =
73996557f78SAurabindo Pillai 			AMD_FMT_MOD |
740f340f2baSMarek Olšák 			AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12) |
741f340f2baSMarek Olšák 			AMD_FMT_MOD_SET(TILE, swizzle_mode) |
742f340f2baSMarek Olšák 			AMD_FMT_MOD_SET(DCC, afb->gfx12_dcc) |
743f340f2baSMarek Olšák 			AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block);
744f340f2baSMarek Olšák 	}
74596557f78SAurabindo Pillai 
74696557f78SAurabindo Pillai 	afb->base.modifier = modifier;
74796557f78SAurabindo Pillai 	afb->base.flags |= DRM_MODE_FB_MODIFIERS;
74896557f78SAurabindo Pillai 	return 0;
74996557f78SAurabindo Pillai }
75096557f78SAurabindo Pillai 
convert_tiling_flags_to_modifier(struct amdgpu_framebuffer * afb)75108d76915SBas Nieuwenhuizen static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
75208d76915SBas Nieuwenhuizen {
75308d76915SBas Nieuwenhuizen 	struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
75408d76915SBas Nieuwenhuizen 	uint64_t modifier = 0;
755543036a2SAurabindo Pillai 	int num_pipes = 0;
756543036a2SAurabindo Pillai 	int num_pkrs = 0;
757543036a2SAurabindo Pillai 
758543036a2SAurabindo Pillai 	num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
759543036a2SAurabindo Pillai 	num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes;
76008d76915SBas Nieuwenhuizen 
76108d76915SBas Nieuwenhuizen 	if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
76208d76915SBas Nieuwenhuizen 		modifier = DRM_FORMAT_MOD_LINEAR;
76308d76915SBas Nieuwenhuizen 	} else {
76408d76915SBas Nieuwenhuizen 		int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
76508d76915SBas Nieuwenhuizen 		bool has_xor = swizzle >= 16;
76608d76915SBas Nieuwenhuizen 		int block_size_bits;
76708d76915SBas Nieuwenhuizen 		int version;
76808d76915SBas Nieuwenhuizen 		int pipe_xor_bits = 0;
76908d76915SBas Nieuwenhuizen 		int bank_xor_bits = 0;
77008d76915SBas Nieuwenhuizen 		int packers = 0;
7711331e630SBas Nieuwenhuizen 		int rb = 0;
772543036a2SAurabindo Pillai 		int pipes = ilog2(num_pipes);
77308d76915SBas Nieuwenhuizen 		uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
77408d76915SBas Nieuwenhuizen 
77508d76915SBas Nieuwenhuizen 		switch (swizzle >> 2) {
77608d76915SBas Nieuwenhuizen 		case 0: /* 256B */
77708d76915SBas Nieuwenhuizen 			block_size_bits = 8;
77808d76915SBas Nieuwenhuizen 			break;
77908d76915SBas Nieuwenhuizen 		case 1: /* 4KiB */
78008d76915SBas Nieuwenhuizen 		case 5: /* 4KiB _X */
78108d76915SBas Nieuwenhuizen 			block_size_bits = 12;
78208d76915SBas Nieuwenhuizen 			break;
78308d76915SBas Nieuwenhuizen 		case 2: /* 64KiB */
78408d76915SBas Nieuwenhuizen 		case 4: /* 64 KiB _T */
78508d76915SBas Nieuwenhuizen 		case 6: /* 64 KiB _X */
78608d76915SBas Nieuwenhuizen 			block_size_bits = 16;
78708d76915SBas Nieuwenhuizen 			break;
788543036a2SAurabindo Pillai 		case 7: /* 256 KiB */
789543036a2SAurabindo Pillai 			block_size_bits = 18;
790543036a2SAurabindo Pillai 			break;
79108d76915SBas Nieuwenhuizen 		default:
79208d76915SBas Nieuwenhuizen 			/* RESERVED or VAR */
79308d76915SBas Nieuwenhuizen 			return -EINVAL;
79408d76915SBas Nieuwenhuizen 		}
79508d76915SBas Nieuwenhuizen 
7964e8303cfSLijo Lazar 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0))
797543036a2SAurabindo Pillai 			version = AMD_FMT_MOD_TILE_VER_GFX11;
7984e8303cfSLijo Lazar 		else if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
7994e8303cfSLijo Lazar 			 IP_VERSION(10, 3, 0))
80008d76915SBas Nieuwenhuizen 			version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
8014e8303cfSLijo Lazar 		else if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
8024e8303cfSLijo Lazar 			 IP_VERSION(10, 0, 0))
80308d76915SBas Nieuwenhuizen 			version = AMD_FMT_MOD_TILE_VER_GFX10;
80408d76915SBas Nieuwenhuizen 		else
80508d76915SBas Nieuwenhuizen 			version = AMD_FMT_MOD_TILE_VER_GFX9;
80608d76915SBas Nieuwenhuizen 
80708d76915SBas Nieuwenhuizen 		switch (swizzle & 3) {
80808d76915SBas Nieuwenhuizen 		case 0: /* Z microtiling */
80908d76915SBas Nieuwenhuizen 			return -EINVAL;
81008d76915SBas Nieuwenhuizen 		case 1: /* S microtiling */
8114e8303cfSLijo Lazar 			if (amdgpu_ip_version(adev, GC_HWIP, 0) <
8124e8303cfSLijo Lazar 			    IP_VERSION(11, 0, 0)) {
81308d76915SBas Nieuwenhuizen 				if (!has_xor)
81408d76915SBas Nieuwenhuizen 					version = AMD_FMT_MOD_TILE_VER_GFX9;
815543036a2SAurabindo Pillai 			}
81608d76915SBas Nieuwenhuizen 			break;
81708d76915SBas Nieuwenhuizen 		case 2:
8184e8303cfSLijo Lazar 			if (amdgpu_ip_version(adev, GC_HWIP, 0) <
8194e8303cfSLijo Lazar 			    IP_VERSION(11, 0, 0)) {
82008d76915SBas Nieuwenhuizen 				if (!has_xor && afb->base.format->cpp[0] != 4)
82108d76915SBas Nieuwenhuizen 					version = AMD_FMT_MOD_TILE_VER_GFX9;
822543036a2SAurabindo Pillai 			}
82308d76915SBas Nieuwenhuizen 			break;
82408d76915SBas Nieuwenhuizen 		case 3:
82508d76915SBas Nieuwenhuizen 			break;
82608d76915SBas Nieuwenhuizen 		}
82708d76915SBas Nieuwenhuizen 
82808d76915SBas Nieuwenhuizen 		if (has_xor) {
829543036a2SAurabindo Pillai 			if (num_pipes == num_pkrs && num_pkrs == 0) {
830543036a2SAurabindo Pillai 				DRM_ERROR("invalid number of pipes and packers\n");
831543036a2SAurabindo Pillai 				return -EINVAL;
832543036a2SAurabindo Pillai 			}
833543036a2SAurabindo Pillai 
83408d76915SBas Nieuwenhuizen 			switch (version) {
835543036a2SAurabindo Pillai 			case AMD_FMT_MOD_TILE_VER_GFX11:
836543036a2SAurabindo Pillai 				pipe_xor_bits = min(block_size_bits - 8, pipes);
837ff15cea3SAurabindo Pillai 				packers = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
838543036a2SAurabindo Pillai 				break;
83908d76915SBas Nieuwenhuizen 			case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
8401331e630SBas Nieuwenhuizen 				pipe_xor_bits = min(block_size_bits - 8, pipes);
84108d76915SBas Nieuwenhuizen 				packers = min(block_size_bits - 8 - pipe_xor_bits,
84208d76915SBas Nieuwenhuizen 					      ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
84308d76915SBas Nieuwenhuizen 				break;
84408d76915SBas Nieuwenhuizen 			case AMD_FMT_MOD_TILE_VER_GFX10:
8451331e630SBas Nieuwenhuizen 				pipe_xor_bits = min(block_size_bits - 8, pipes);
84608d76915SBas Nieuwenhuizen 				break;
84708d76915SBas Nieuwenhuizen 			case AMD_FMT_MOD_TILE_VER_GFX9:
8481331e630SBas Nieuwenhuizen 				rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
8491331e630SBas Nieuwenhuizen 				     ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
8501331e630SBas Nieuwenhuizen 				pipe_xor_bits = min(block_size_bits - 8, pipes +
85108d76915SBas Nieuwenhuizen 						    ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
85208d76915SBas Nieuwenhuizen 				bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
85308d76915SBas Nieuwenhuizen 						    ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
85408d76915SBas Nieuwenhuizen 				break;
85508d76915SBas Nieuwenhuizen 			}
85608d76915SBas Nieuwenhuizen 		}
85708d76915SBas Nieuwenhuizen 
85808d76915SBas Nieuwenhuizen 		modifier = AMD_FMT_MOD |
85908d76915SBas Nieuwenhuizen 			   AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
86008d76915SBas Nieuwenhuizen 			   AMD_FMT_MOD_SET(TILE_VERSION, version) |
86108d76915SBas Nieuwenhuizen 			   AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
86208d76915SBas Nieuwenhuizen 			   AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
86308d76915SBas Nieuwenhuizen 			   AMD_FMT_MOD_SET(PACKERS, packers);
86408d76915SBas Nieuwenhuizen 
86508d76915SBas Nieuwenhuizen 		if (dcc_offset != 0) {
86608d76915SBas Nieuwenhuizen 			bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
86708d76915SBas Nieuwenhuizen 			bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
868816853f9SBas Nieuwenhuizen 			const struct drm_format_info *format_info;
8691331e630SBas Nieuwenhuizen 			u64 render_dcc_offset;
87008d76915SBas Nieuwenhuizen 
87108d76915SBas Nieuwenhuizen 			/* Enable constant encode on RAVEN2 and later. */
8724e8303cfSLijo Lazar 			bool dcc_constant_encode =
8734e8303cfSLijo Lazar 				(adev->asic_type > CHIP_RAVEN ||
87408d76915SBas Nieuwenhuizen 				 (adev->asic_type == CHIP_RAVEN &&
875543036a2SAurabindo Pillai 				  adev->external_rev_id >= 0x81)) &&
8764e8303cfSLijo Lazar 				amdgpu_ip_version(adev, GC_HWIP, 0) <
8774e8303cfSLijo Lazar 					IP_VERSION(11, 0, 0);
87808d76915SBas Nieuwenhuizen 
87908d76915SBas Nieuwenhuizen 			int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
88008d76915SBas Nieuwenhuizen 					      dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
88108d76915SBas Nieuwenhuizen 					      AMD_FMT_MOD_DCC_BLOCK_256B;
88208d76915SBas Nieuwenhuizen 
88308d76915SBas Nieuwenhuizen 			modifier |= AMD_FMT_MOD_SET(DCC, 1) |
88408d76915SBas Nieuwenhuizen 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
88508d76915SBas Nieuwenhuizen 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
88608d76915SBas Nieuwenhuizen 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
88708d76915SBas Nieuwenhuizen 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
88808d76915SBas Nieuwenhuizen 
88908d76915SBas Nieuwenhuizen 			afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
8901331e630SBas Nieuwenhuizen 			afb->base.pitches[1] =
8911331e630SBas Nieuwenhuizen 				AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
892816853f9SBas Nieuwenhuizen 
8931331e630SBas Nieuwenhuizen 			/*
8941331e630SBas Nieuwenhuizen 			 * If the userspace driver uses retiling the tiling flags do not contain
8951331e630SBas Nieuwenhuizen 			 * info on the renderable DCC buffer. Luckily the opaque metadata contains
8961331e630SBas Nieuwenhuizen 			 * the info so we can try to extract it. The kernel does not use this info
8971331e630SBas Nieuwenhuizen 			 * but we should convert it to a modifier plane for getfb2, so the
8981331e630SBas Nieuwenhuizen 			 * userspace driver that gets it doesn't have to juggle around another DCC
8991331e630SBas Nieuwenhuizen 			 * plane internally.
9001331e630SBas Nieuwenhuizen 			 */
9011331e630SBas Nieuwenhuizen 			if (extract_render_dcc_offset(adev, afb->base.obj[0],
9021331e630SBas Nieuwenhuizen 						      &render_dcc_offset) == 0 &&
9031331e630SBas Nieuwenhuizen 			    render_dcc_offset != 0 &&
9041331e630SBas Nieuwenhuizen 			    render_dcc_offset != afb->base.offsets[1] &&
9051331e630SBas Nieuwenhuizen 			    render_dcc_offset < UINT_MAX) {
9061331e630SBas Nieuwenhuizen 				uint32_t dcc_block_bits;  /* of base surface data */
9071331e630SBas Nieuwenhuizen 
9081331e630SBas Nieuwenhuizen 				modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
9091331e630SBas Nieuwenhuizen 				afb->base.offsets[2] = render_dcc_offset;
9101331e630SBas Nieuwenhuizen 
9111331e630SBas Nieuwenhuizen 				if (adev->family >= AMDGPU_FAMILY_NV) {
9121331e630SBas Nieuwenhuizen 					int extra_pipe = 0;
9131331e630SBas Nieuwenhuizen 
9144e8303cfSLijo Lazar 					if ((amdgpu_ip_version(adev, GC_HWIP,
9154e8303cfSLijo Lazar 							       0) >=
9164e8303cfSLijo Lazar 					     IP_VERSION(10, 3, 0)) &&
9171331e630SBas Nieuwenhuizen 					    pipes == packers && pipes > 1)
9181331e630SBas Nieuwenhuizen 						extra_pipe = 1;
9191331e630SBas Nieuwenhuizen 
9201331e630SBas Nieuwenhuizen 					dcc_block_bits = max(20, 16 + pipes + extra_pipe);
9211331e630SBas Nieuwenhuizen 				} else {
9221331e630SBas Nieuwenhuizen 					modifier |= AMD_FMT_MOD_SET(RB, rb) |
9231331e630SBas Nieuwenhuizen 						    AMD_FMT_MOD_SET(PIPE, pipes);
9241331e630SBas Nieuwenhuizen 					dcc_block_bits = max(20, 18 + rb);
9251331e630SBas Nieuwenhuizen 				}
9261331e630SBas Nieuwenhuizen 
9271331e630SBas Nieuwenhuizen 				dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
9281331e630SBas Nieuwenhuizen 				afb->base.pitches[2] = ALIGN(afb->base.width,
9291331e630SBas Nieuwenhuizen 							     1u << ((dcc_block_bits + 1) / 2));
9301331e630SBas Nieuwenhuizen 			}
931816853f9SBas Nieuwenhuizen 			format_info = amdgpu_lookup_format_info(afb->base.format->format,
932816853f9SBas Nieuwenhuizen 								modifier);
933816853f9SBas Nieuwenhuizen 			if (!format_info)
934816853f9SBas Nieuwenhuizen 				return -EINVAL;
935816853f9SBas Nieuwenhuizen 
936816853f9SBas Nieuwenhuizen 			afb->base.format = format_info;
93708d76915SBas Nieuwenhuizen 		}
93808d76915SBas Nieuwenhuizen 	}
93908d76915SBas Nieuwenhuizen 
94008d76915SBas Nieuwenhuizen 	afb->base.modifier = modifier;
94108d76915SBas Nieuwenhuizen 	afb->base.flags |= DRM_MODE_FB_MODIFIERS;
94208d76915SBas Nieuwenhuizen 	return 0;
94308d76915SBas Nieuwenhuizen }
94408d76915SBas Nieuwenhuizen 
9452f350ddaSSimon Ser /* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
check_tiling_flags_gfx6(struct amdgpu_framebuffer * afb)9462f350ddaSSimon Ser static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
9472f350ddaSSimon Ser {
9482f350ddaSSimon Ser 	u64 micro_tile_mode;
9492f350ddaSSimon Ser 
95011317d29SMarek Olšák 	if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */
9512f350ddaSSimon Ser 		return 0;
9522f350ddaSSimon Ser 
9532f350ddaSSimon Ser 	micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
9542f350ddaSSimon Ser 	switch (micro_tile_mode) {
9552f350ddaSSimon Ser 	case 0: /* DISPLAY */
9562f350ddaSSimon Ser 	case 3: /* RENDER */
9572f350ddaSSimon Ser 		return 0;
9582f350ddaSSimon Ser 	default:
9592f350ddaSSimon Ser 		drm_dbg_kms(afb->base.dev,
9602f350ddaSSimon Ser 			    "Micro tile mode %llu not supported for scanout\n",
9612f350ddaSSimon Ser 			    micro_tile_mode);
9622f350ddaSSimon Ser 		return -EINVAL;
9632f350ddaSSimon Ser 	}
9642f350ddaSSimon Ser }
9652f350ddaSSimon Ser 
get_block_dimensions(unsigned int block_log2,unsigned int cpp,unsigned int * width,unsigned int * height)966234055fdSBas Nieuwenhuizen static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
967234055fdSBas Nieuwenhuizen 				 unsigned int *width, unsigned int *height)
968234055fdSBas Nieuwenhuizen {
969234055fdSBas Nieuwenhuizen 	unsigned int cpp_log2 = ilog2(cpp);
970234055fdSBas Nieuwenhuizen 	unsigned int pixel_log2 = block_log2 - cpp_log2;
971234055fdSBas Nieuwenhuizen 	unsigned int width_log2 = (pixel_log2 + 1) / 2;
972234055fdSBas Nieuwenhuizen 	unsigned int height_log2 = pixel_log2 - width_log2;
973234055fdSBas Nieuwenhuizen 
974234055fdSBas Nieuwenhuizen 	*width = 1 << width_log2;
975234055fdSBas Nieuwenhuizen 	*height = 1 << height_log2;
976234055fdSBas Nieuwenhuizen }
977234055fdSBas Nieuwenhuizen 
get_dcc_block_size(uint64_t modifier,bool rb_aligned,bool pipe_aligned)978234055fdSBas Nieuwenhuizen static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
979234055fdSBas Nieuwenhuizen 				       bool pipe_aligned)
980234055fdSBas Nieuwenhuizen {
981234055fdSBas Nieuwenhuizen 	unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
982234055fdSBas Nieuwenhuizen 
983234055fdSBas Nieuwenhuizen 	switch (ver) {
984234055fdSBas Nieuwenhuizen 	case AMD_FMT_MOD_TILE_VER_GFX9: {
985234055fdSBas Nieuwenhuizen 		/*
986234055fdSBas Nieuwenhuizen 		 * TODO: for pipe aligned we may need to check the alignment of the
987234055fdSBas Nieuwenhuizen 		 * total size of the surface, which may need to be bigger than the
988234055fdSBas Nieuwenhuizen 		 * natural alignment due to some HW workarounds
989234055fdSBas Nieuwenhuizen 		 */
990234055fdSBas Nieuwenhuizen 		return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
991234055fdSBas Nieuwenhuizen 	}
992234055fdSBas Nieuwenhuizen 	case AMD_FMT_MOD_TILE_VER_GFX10:
993543036a2SAurabindo Pillai 	case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
994543036a2SAurabindo Pillai 	case AMD_FMT_MOD_TILE_VER_GFX11: {
995234055fdSBas Nieuwenhuizen 		int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
996234055fdSBas Nieuwenhuizen 
997543036a2SAurabindo Pillai 		if (ver >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
998234055fdSBas Nieuwenhuizen 		    AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
999234055fdSBas Nieuwenhuizen 			++pipes_log2;
1000234055fdSBas Nieuwenhuizen 
1001234055fdSBas Nieuwenhuizen 		return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
1002234055fdSBas Nieuwenhuizen 	}
1003234055fdSBas Nieuwenhuizen 	default:
1004234055fdSBas Nieuwenhuizen 		return 0;
1005234055fdSBas Nieuwenhuizen 	}
1006234055fdSBas Nieuwenhuizen }
1007234055fdSBas Nieuwenhuizen 
amdgpu_display_verify_plane(struct amdgpu_framebuffer * rfb,int plane,const struct drm_format_info * format,unsigned int block_width,unsigned int block_height,unsigned int block_size_log2)1008234055fdSBas Nieuwenhuizen static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
1009234055fdSBas Nieuwenhuizen 				       const struct drm_format_info *format,
1010234055fdSBas Nieuwenhuizen 				       unsigned int block_width, unsigned int block_height,
1011234055fdSBas Nieuwenhuizen 				       unsigned int block_size_log2)
1012234055fdSBas Nieuwenhuizen {
1013234055fdSBas Nieuwenhuizen 	unsigned int width = rfb->base.width /
1014234055fdSBas Nieuwenhuizen 		((plane && plane < format->num_planes) ? format->hsub : 1);
1015234055fdSBas Nieuwenhuizen 	unsigned int height = rfb->base.height /
1016234055fdSBas Nieuwenhuizen 		((plane && plane < format->num_planes) ? format->vsub : 1);
1017234055fdSBas Nieuwenhuizen 	unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
1018234055fdSBas Nieuwenhuizen 	unsigned int block_pitch = block_width * cpp;
1019234055fdSBas Nieuwenhuizen 	unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
1020234055fdSBas Nieuwenhuizen 	unsigned int block_size = 1 << block_size_log2;
1021234055fdSBas Nieuwenhuizen 	uint64_t size;
1022234055fdSBas Nieuwenhuizen 
1023234055fdSBas Nieuwenhuizen 	if (rfb->base.pitches[plane] % block_pitch) {
1024234055fdSBas Nieuwenhuizen 		drm_dbg_kms(rfb->base.dev,
1025234055fdSBas Nieuwenhuizen 			    "pitch %d for plane %d is not a multiple of block pitch %d\n",
1026234055fdSBas Nieuwenhuizen 			    rfb->base.pitches[plane], plane, block_pitch);
1027234055fdSBas Nieuwenhuizen 		return -EINVAL;
1028234055fdSBas Nieuwenhuizen 	}
1029234055fdSBas Nieuwenhuizen 	if (rfb->base.pitches[plane] < min_pitch) {
1030234055fdSBas Nieuwenhuizen 		drm_dbg_kms(rfb->base.dev,
1031234055fdSBas Nieuwenhuizen 			    "pitch %d for plane %d is less than minimum pitch %d\n",
1032234055fdSBas Nieuwenhuizen 			    rfb->base.pitches[plane], plane, min_pitch);
1033234055fdSBas Nieuwenhuizen 		return -EINVAL;
1034234055fdSBas Nieuwenhuizen 	}
1035234055fdSBas Nieuwenhuizen 
1036234055fdSBas Nieuwenhuizen 	/* Force at least natural alignment. */
1037234055fdSBas Nieuwenhuizen 	if (rfb->base.offsets[plane] % block_size) {
1038234055fdSBas Nieuwenhuizen 		drm_dbg_kms(rfb->base.dev,
1039234055fdSBas Nieuwenhuizen 			    "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
1040234055fdSBas Nieuwenhuizen 			    rfb->base.offsets[plane], plane, block_size);
1041234055fdSBas Nieuwenhuizen 		return -EINVAL;
1042234055fdSBas Nieuwenhuizen 	}
1043234055fdSBas Nieuwenhuizen 
1044234055fdSBas Nieuwenhuizen 	size = rfb->base.offsets[plane] +
1045234055fdSBas Nieuwenhuizen 		(uint64_t)rfb->base.pitches[plane] / block_pitch *
1046234055fdSBas Nieuwenhuizen 		block_size * DIV_ROUND_UP(height, block_height);
1047234055fdSBas Nieuwenhuizen 
1048234055fdSBas Nieuwenhuizen 	if (rfb->base.obj[0]->size < size) {
1049234055fdSBas Nieuwenhuizen 		drm_dbg_kms(rfb->base.dev,
1050234055fdSBas Nieuwenhuizen 			    "BO size 0x%zx is less than 0x%llx required for plane %d\n",
1051234055fdSBas Nieuwenhuizen 			    rfb->base.obj[0]->size, size, plane);
1052234055fdSBas Nieuwenhuizen 		return -EINVAL;
1053234055fdSBas Nieuwenhuizen 	}
1054234055fdSBas Nieuwenhuizen 
1055234055fdSBas Nieuwenhuizen 	return 0;
1056234055fdSBas Nieuwenhuizen }
1057234055fdSBas Nieuwenhuizen 
1058234055fdSBas Nieuwenhuizen 
amdgpu_display_verify_sizes(struct amdgpu_framebuffer * rfb)1059234055fdSBas Nieuwenhuizen static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
1060234055fdSBas Nieuwenhuizen {
1061234055fdSBas Nieuwenhuizen 	const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
1062234055fdSBas Nieuwenhuizen 	uint64_t modifier = rfb->base.modifier;
1063234055fdSBas Nieuwenhuizen 	int ret;
1064234055fdSBas Nieuwenhuizen 	unsigned int i, block_width, block_height, block_size_log2;
1065234055fdSBas Nieuwenhuizen 
10662af10429STomohito Esaki 	if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
1067234055fdSBas Nieuwenhuizen 		return 0;
1068234055fdSBas Nieuwenhuizen 
1069234055fdSBas Nieuwenhuizen 	for (i = 0; i < format_info->num_planes; ++i) {
1070234055fdSBas Nieuwenhuizen 		if (modifier == DRM_FORMAT_MOD_LINEAR) {
1071234055fdSBas Nieuwenhuizen 			block_width = 256 / format_info->cpp[i];
1072234055fdSBas Nieuwenhuizen 			block_height = 1;
1073234055fdSBas Nieuwenhuizen 			block_size_log2 = 8;
10748dd1426eSMarek Olšák 		} else if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) {
10758dd1426eSMarek Olšák 			int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
10768dd1426eSMarek Olšák 
10778dd1426eSMarek Olšák 			switch (swizzle) {
10788dd1426eSMarek Olšák 			case AMD_FMT_MOD_TILE_GFX12_256B_2D:
10798dd1426eSMarek Olšák 				block_size_log2 = 8;
10808dd1426eSMarek Olšák 				break;
10818dd1426eSMarek Olšák 			case AMD_FMT_MOD_TILE_GFX12_4K_2D:
10828dd1426eSMarek Olšák 				block_size_log2 = 12;
10838dd1426eSMarek Olšák 				break;
10848dd1426eSMarek Olšák 			case AMD_FMT_MOD_TILE_GFX12_64K_2D:
10858dd1426eSMarek Olšák 				block_size_log2 = 16;
10868dd1426eSMarek Olšák 				break;
10878dd1426eSMarek Olšák 			case AMD_FMT_MOD_TILE_GFX12_256K_2D:
10888dd1426eSMarek Olšák 				block_size_log2 = 18;
10898dd1426eSMarek Olšák 				break;
10908dd1426eSMarek Olšák 			default:
10918dd1426eSMarek Olšák 				drm_dbg_kms(rfb->base.dev,
10928dd1426eSMarek Olšák 					    "Gfx12 swizzle mode with unknown block size: %d\n", swizzle);
10938dd1426eSMarek Olšák 				return -EINVAL;
10948dd1426eSMarek Olšák 			}
10958dd1426eSMarek Olšák 
10968dd1426eSMarek Olšák 			get_block_dimensions(block_size_log2, format_info->cpp[i],
10978dd1426eSMarek Olšák 					     &block_width, &block_height);
1098234055fdSBas Nieuwenhuizen 		} else {
1099234055fdSBas Nieuwenhuizen 			int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
1100234055fdSBas Nieuwenhuizen 
1101234055fdSBas Nieuwenhuizen 			switch ((swizzle & ~3) + 1) {
1102234055fdSBas Nieuwenhuizen 			case DC_SW_256B_S:
1103234055fdSBas Nieuwenhuizen 				block_size_log2 = 8;
1104234055fdSBas Nieuwenhuizen 				break;
1105234055fdSBas Nieuwenhuizen 			case DC_SW_4KB_S:
1106234055fdSBas Nieuwenhuizen 			case DC_SW_4KB_S_X:
1107234055fdSBas Nieuwenhuizen 				block_size_log2 = 12;
1108234055fdSBas Nieuwenhuizen 				break;
1109234055fdSBas Nieuwenhuizen 			case DC_SW_64KB_S:
1110234055fdSBas Nieuwenhuizen 			case DC_SW_64KB_S_T:
1111234055fdSBas Nieuwenhuizen 			case DC_SW_64KB_S_X:
1112234055fdSBas Nieuwenhuizen 				block_size_log2 = 16;
1113234055fdSBas Nieuwenhuizen 				break;
1114543036a2SAurabindo Pillai 			case DC_SW_VAR_S_X:
1115543036a2SAurabindo Pillai 				block_size_log2 = 18;
1116543036a2SAurabindo Pillai 				break;
1117234055fdSBas Nieuwenhuizen 			default:
1118234055fdSBas Nieuwenhuizen 				drm_dbg_kms(rfb->base.dev,
1119234055fdSBas Nieuwenhuizen 					    "Swizzle mode with unknown block size: %d\n", swizzle);
1120234055fdSBas Nieuwenhuizen 				return -EINVAL;
1121234055fdSBas Nieuwenhuizen 			}
1122234055fdSBas Nieuwenhuizen 
1123234055fdSBas Nieuwenhuizen 			get_block_dimensions(block_size_log2, format_info->cpp[i],
1124234055fdSBas Nieuwenhuizen 					     &block_width, &block_height);
1125234055fdSBas Nieuwenhuizen 		}
1126234055fdSBas Nieuwenhuizen 
1127234055fdSBas Nieuwenhuizen 		ret = amdgpu_display_verify_plane(rfb, i, format_info,
1128234055fdSBas Nieuwenhuizen 						  block_width, block_height, block_size_log2);
1129234055fdSBas Nieuwenhuizen 		if (ret)
1130234055fdSBas Nieuwenhuizen 			return ret;
1131234055fdSBas Nieuwenhuizen 	}
1132234055fdSBas Nieuwenhuizen 
11338dd1426eSMarek Olšák 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11 &&
11348dd1426eSMarek Olšák 	    AMD_FMT_MOD_GET(DCC, modifier)) {
1135234055fdSBas Nieuwenhuizen 		if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
1136234055fdSBas Nieuwenhuizen 			block_size_log2 = get_dcc_block_size(modifier, false, false);
1137234055fdSBas Nieuwenhuizen 			get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1138234055fdSBas Nieuwenhuizen 					     &block_width, &block_height);
1139234055fdSBas Nieuwenhuizen 			ret = amdgpu_display_verify_plane(rfb, i, format_info,
1140234055fdSBas Nieuwenhuizen 							  block_width, block_height,
1141234055fdSBas Nieuwenhuizen 							  block_size_log2);
1142234055fdSBas Nieuwenhuizen 			if (ret)
1143234055fdSBas Nieuwenhuizen 				return ret;
1144234055fdSBas Nieuwenhuizen 
1145234055fdSBas Nieuwenhuizen 			++i;
1146234055fdSBas Nieuwenhuizen 			block_size_log2 = get_dcc_block_size(modifier, true, true);
1147234055fdSBas Nieuwenhuizen 		} else {
1148234055fdSBas Nieuwenhuizen 			bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
1149234055fdSBas Nieuwenhuizen 
1150234055fdSBas Nieuwenhuizen 			block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
1151234055fdSBas Nieuwenhuizen 		}
1152234055fdSBas Nieuwenhuizen 		get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1153234055fdSBas Nieuwenhuizen 				     &block_width, &block_height);
1154234055fdSBas Nieuwenhuizen 		ret = amdgpu_display_verify_plane(rfb, i, format_info,
1155234055fdSBas Nieuwenhuizen 						  block_width, block_height, block_size_log2);
1156234055fdSBas Nieuwenhuizen 		if (ret)
1157234055fdSBas Nieuwenhuizen 			return ret;
1158234055fdSBas Nieuwenhuizen 	}
1159234055fdSBas Nieuwenhuizen 
1160234055fdSBas Nieuwenhuizen 	return 0;
1161234055fdSBas Nieuwenhuizen }
1162234055fdSBas Nieuwenhuizen 
amdgpu_display_get_fb_info(const struct amdgpu_framebuffer * amdgpu_fb,uint64_t * tiling_flags,bool * tmz_surface,bool * gfx12_dcc)11636eed95b0SBas Nieuwenhuizen static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
11640d3157d0SMarek Olšák 				      uint64_t *tiling_flags, bool *tmz_surface,
11650d3157d0SMarek Olšák 				      bool *gfx12_dcc)
11666eed95b0SBas Nieuwenhuizen {
11676eed95b0SBas Nieuwenhuizen 	struct amdgpu_bo *rbo;
11686eed95b0SBas Nieuwenhuizen 	int r;
11696eed95b0SBas Nieuwenhuizen 
11706eed95b0SBas Nieuwenhuizen 	if (!amdgpu_fb) {
11716eed95b0SBas Nieuwenhuizen 		*tiling_flags = 0;
11726eed95b0SBas Nieuwenhuizen 		*tmz_surface = false;
11730d3157d0SMarek Olšák 		*gfx12_dcc = false;
11746eed95b0SBas Nieuwenhuizen 		return 0;
11756eed95b0SBas Nieuwenhuizen 	}
11766eed95b0SBas Nieuwenhuizen 
11776eed95b0SBas Nieuwenhuizen 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
11786eed95b0SBas Nieuwenhuizen 	r = amdgpu_bo_reserve(rbo, false);
11796eed95b0SBas Nieuwenhuizen 
11806eed95b0SBas Nieuwenhuizen 	if (unlikely(r)) {
11816eed95b0SBas Nieuwenhuizen 		/* Don't show error message when returning -ERESTARTSYS */
11826eed95b0SBas Nieuwenhuizen 		if (r != -ERESTARTSYS)
11836eed95b0SBas Nieuwenhuizen 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
11846eed95b0SBas Nieuwenhuizen 		return r;
11856eed95b0SBas Nieuwenhuizen 	}
11866eed95b0SBas Nieuwenhuizen 
11876eed95b0SBas Nieuwenhuizen 	amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
11886eed95b0SBas Nieuwenhuizen 	*tmz_surface = amdgpu_bo_encrypted(rbo);
11890d3157d0SMarek Olšák 	*gfx12_dcc = rbo->flags & AMDGPU_GEM_CREATE_GFX12_DCC;
11906eed95b0SBas Nieuwenhuizen 
11916eed95b0SBas Nieuwenhuizen 	amdgpu_bo_unreserve(rbo);
11926eed95b0SBas Nieuwenhuizen 
11936eed95b0SBas Nieuwenhuizen 	return r;
11946eed95b0SBas Nieuwenhuizen }
11956eed95b0SBas Nieuwenhuizen 
amdgpu_display_gem_fb_verify_and_init(struct drm_device * dev,struct amdgpu_framebuffer * rfb,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object * obj)1196c5b26681SAlex Deucher static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
1197f258907fSMark Yacoub 						 struct amdgpu_framebuffer *rfb,
1198c5b26681SAlex Deucher 						 struct drm_file *file_priv,
1199f258907fSMark Yacoub 						 const struct drm_mode_fb_cmd2 *mode_cmd,
1200f258907fSMark Yacoub 						 struct drm_gem_object *obj)
1201f258907fSMark Yacoub {
1202f258907fSMark Yacoub 	int ret;
1203f258907fSMark Yacoub 
1204f258907fSMark Yacoub 	rfb->base.obj[0] = obj;
1205f258907fSMark Yacoub 	drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1206fe180178SQingqing Zhuo 	/* Verify that the modifier is supported. */
1207fe180178SQingqing Zhuo 	if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
1208fe180178SQingqing Zhuo 				      mode_cmd->modifier[0])) {
1209fe180178SQingqing Zhuo 		drm_dbg_kms(dev,
12105a6af54dSThomas Zimmermann 			    "unsupported pixel format %p4cc / modifier 0x%llx\n",
12115a6af54dSThomas Zimmermann 			    &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1212fe180178SQingqing Zhuo 
1213fe180178SQingqing Zhuo 		ret = -EINVAL;
1214fe180178SQingqing Zhuo 		goto err;
1215fe180178SQingqing Zhuo 	}
1216f258907fSMark Yacoub 
1217f258907fSMark Yacoub 	ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1218f258907fSMark Yacoub 	if (ret)
1219f258907fSMark Yacoub 		goto err;
1220f258907fSMark Yacoub 
12211c6b6bd0SHamza Mahfooz 	if (drm_drv_uses_atomic_modeset(dev))
12221c6b6bd0SHamza Mahfooz 		ret = drm_framebuffer_init(dev, &rfb->base,
12231c6b6bd0SHamza Mahfooz 					   &amdgpu_fb_funcs_atomic);
12241c6b6bd0SHamza Mahfooz 	else
122524981fa3SMichel Dänzer 		ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
122617d819e2SHamza Mahfooz 
122724981fa3SMichel Dänzer 	if (ret)
122824981fa3SMichel Dänzer 		goto err;
122924981fa3SMichel Dänzer 
1230f258907fSMark Yacoub 	return 0;
1231f258907fSMark Yacoub err:
123232d6378cSMichel Dänzer 	drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
1233f258907fSMark Yacoub 	rfb->base.obj[0] = NULL;
1234f258907fSMark Yacoub 	return ret;
1235f258907fSMark Yacoub }
1236f258907fSMark Yacoub 
amdgpu_display_framebuffer_init(struct drm_device * dev,struct amdgpu_framebuffer * rfb,const struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object * obj)123731d5c523SAlex Deucher static int amdgpu_display_framebuffer_init(struct drm_device *dev,
1238d38ceaf9SAlex Deucher 					   struct amdgpu_framebuffer *rfb,
12391eb83451SVille Syrjälä 					   const struct drm_mode_fb_cmd2 *mode_cmd,
1240d38ceaf9SAlex Deucher 					   struct drm_gem_object *obj)
1241d38ceaf9SAlex Deucher {
12422f350ddaSSimon Ser 	struct amdgpu_device *adev = drm_to_adev(dev);
12433505b2ffSBas Nieuwenhuizen 	int ret, i;
12446eed95b0SBas Nieuwenhuizen 
12453505b2ffSBas Nieuwenhuizen 	/*
12463505b2ffSBas Nieuwenhuizen 	 * This needs to happen before modifier conversion as that might change
12473505b2ffSBas Nieuwenhuizen 	 * the number of planes.
12483505b2ffSBas Nieuwenhuizen 	 */
12493505b2ffSBas Nieuwenhuizen 	for (i = 1; i < rfb->base.format->num_planes; ++i) {
12503505b2ffSBas Nieuwenhuizen 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
1251ccac8babSSimon Ser 			drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
12523505b2ffSBas Nieuwenhuizen 				    i, mode_cmd->handles[0], mode_cmd->handles[i]);
12533505b2ffSBas Nieuwenhuizen 			ret = -EINVAL;
1254f258907fSMark Yacoub 			return ret;
12553505b2ffSBas Nieuwenhuizen 		}
12563505b2ffSBas Nieuwenhuizen 	}
12573505b2ffSBas Nieuwenhuizen 
12580d3157d0SMarek Olšák 	ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface,
12590d3157d0SMarek Olšák 					 &rfb->gfx12_dcc);
12606eed95b0SBas Nieuwenhuizen 	if (ret)
1261f258907fSMark Yacoub 		return ret;
12626eed95b0SBas Nieuwenhuizen 
12636c64ae22SDave Airlie 	if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
12642f350ddaSSimon Ser 		drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
12652f350ddaSSimon Ser 			      "GFX9+ requires FB check based on format modifier\n");
12662f350ddaSSimon Ser 		ret = check_tiling_flags_gfx6(rfb);
12672f350ddaSSimon Ser 		if (ret)
12682f350ddaSSimon Ser 			return ret;
12692f350ddaSSimon Ser 	}
12702f350ddaSSimon Ser 
12712af10429STomohito Esaki 	if (!dev->mode_config.fb_modifiers_not_supported &&
127208d76915SBas Nieuwenhuizen 	    !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
1273f340f2baSMarek Olšák 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0))
1274f340f2baSMarek Olšák 			ret = convert_tiling_flags_to_modifier_gfx12(rfb);
1275f340f2baSMarek Olšák 		else
127608d76915SBas Nieuwenhuizen 			ret = convert_tiling_flags_to_modifier(rfb);
1277f340f2baSMarek Olšák 
1278048faf27SSimon Ser 		if (ret) {
1279048faf27SSimon Ser 			drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
1280048faf27SSimon Ser 				    rfb->tiling_flags);
1281f258907fSMark Yacoub 			return ret;
128208d76915SBas Nieuwenhuizen 		}
1283048faf27SSimon Ser 	}
128408d76915SBas Nieuwenhuizen 
1285234055fdSBas Nieuwenhuizen 	ret = amdgpu_display_verify_sizes(rfb);
1286234055fdSBas Nieuwenhuizen 	if (ret)
1287234055fdSBas Nieuwenhuizen 		return ret;
1288234055fdSBas Nieuwenhuizen 
1289234055fdSBas Nieuwenhuizen 	for (i = 0; i < rfb->base.format->num_planes; ++i) {
129079fcd446Sxinhui pan 		drm_gem_object_get(rfb->base.obj[0]);
12913505b2ffSBas Nieuwenhuizen 		rfb->base.obj[i] = rfb->base.obj[0];
12923505b2ffSBas Nieuwenhuizen 	}
12933505b2ffSBas Nieuwenhuizen 
12946eed95b0SBas Nieuwenhuizen 	return 0;
1295d38ceaf9SAlex Deucher }
1296d38ceaf9SAlex Deucher 
1297b0fb632fSHarry Wentland struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)12984d4772f6SSamuel Li amdgpu_display_user_framebuffer_create(struct drm_device *dev,
1299d38ceaf9SAlex Deucher 				       struct drm_file *file_priv,
13001eb83451SVille Syrjälä 				       const struct drm_mode_fb_cmd2 *mode_cmd)
1301d38ceaf9SAlex Deucher {
1302d38ceaf9SAlex Deucher 	struct amdgpu_framebuffer *amdgpu_fb;
1303dd017d01SChristian König 	struct drm_gem_object *obj;
1304dd017d01SChristian König 	struct amdgpu_bo *bo;
1305dd017d01SChristian König 	uint32_t domains;
1306d38ceaf9SAlex Deucher 	int ret;
1307d38ceaf9SAlex Deucher 
1308a8ad0bd8SChris Wilson 	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
1309d38ceaf9SAlex Deucher 	if (obj ==  NULL) {
131093125cb7SSrinivasan Shanmugam 		drm_dbg_kms(dev,
131193125cb7SSrinivasan Shanmugam 			    "No GEM object associated to handle 0x%08X, can't create framebuffer\n",
131293125cb7SSrinivasan Shanmugam 			    mode_cmd->handles[0]);
131393125cb7SSrinivasan Shanmugam 
1314d38ceaf9SAlex Deucher 		return ERR_PTR(-ENOENT);
1315d38ceaf9SAlex Deucher 	}
1316d38ceaf9SAlex Deucher 
13171769152aSChristopher James Halse Rogers 	/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
1318dd017d01SChristian König 	bo = gem_to_amdgpu_bo(obj);
1319dd017d01SChristian König 	domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
1320dd017d01SChristian König 	if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
1321ccac8babSSimon Ser 		drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
1322e0c16eb4SSimon Ser 		drm_gem_object_put(obj);
13231769152aSChristopher James Halse Rogers 		return ERR_PTR(-EINVAL);
13241769152aSChristopher James Halse Rogers 	}
13251769152aSChristopher James Halse Rogers 
1326d38ceaf9SAlex Deucher 	amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
1327d38ceaf9SAlex Deucher 	if (amdgpu_fb == NULL) {
1328e07ddb0cSEmil Velikov 		drm_gem_object_put(obj);
1329d38ceaf9SAlex Deucher 		return ERR_PTR(-ENOMEM);
1330d38ceaf9SAlex Deucher 	}
1331d38ceaf9SAlex Deucher 
1332f258907fSMark Yacoub 	ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
1333f258907fSMark Yacoub 						    mode_cmd, obj);
1334d38ceaf9SAlex Deucher 	if (ret) {
1335d38ceaf9SAlex Deucher 		kfree(amdgpu_fb);
1336e07ddb0cSEmil Velikov 		drm_gem_object_put(obj);
1337d38ceaf9SAlex Deucher 		return ERR_PTR(ret);
1338d38ceaf9SAlex Deucher 	}
1339d38ceaf9SAlex Deucher 
134079fcd446Sxinhui pan 	drm_gem_object_put(obj);
1341d38ceaf9SAlex Deucher 	return &amdgpu_fb->base;
1342d38ceaf9SAlex Deucher }
1343d38ceaf9SAlex Deucher 
1344d38ceaf9SAlex Deucher const struct drm_mode_config_funcs amdgpu_mode_funcs = {
13454d4772f6SSamuel Li 	.fb_create = amdgpu_display_user_framebuffer_create,
1346d38ceaf9SAlex Deucher };
1347d38ceaf9SAlex Deucher 
1348b2edaac4SSrinivasan Shanmugam static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = {
1349b2edaac4SSrinivasan Shanmugam 	{ UNDERSCAN_OFF, "off" },
1350d38ceaf9SAlex Deucher 	{ UNDERSCAN_ON, "on" },
1351d38ceaf9SAlex Deucher 	{ UNDERSCAN_AUTO, "auto" },
1352d38ceaf9SAlex Deucher };
1353d38ceaf9SAlex Deucher 
1354b2edaac4SSrinivasan Shanmugam static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = {
1355b2edaac4SSrinivasan Shanmugam 	{ AMDGPU_AUDIO_DISABLE, "off" },
1356d38ceaf9SAlex Deucher 	{ AMDGPU_AUDIO_ENABLE, "on" },
1357d38ceaf9SAlex Deucher 	{ AMDGPU_AUDIO_AUTO, "auto" },
1358d38ceaf9SAlex Deucher };
1359d38ceaf9SAlex Deucher 
1360d38ceaf9SAlex Deucher /* XXX support different dither options? spatial, temporal, both, etc. */
1361b2edaac4SSrinivasan Shanmugam static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
1362b2edaac4SSrinivasan Shanmugam 	{ AMDGPU_FMT_DITHER_DISABLE, "off" },
1363d38ceaf9SAlex Deucher 	{ AMDGPU_FMT_DITHER_ENABLE, "on" },
1364d38ceaf9SAlex Deucher };
1365d38ceaf9SAlex Deucher 
amdgpu_display_modeset_create_props(struct amdgpu_device * adev)13663dc9b1ceSSamuel Li int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
1367d38ceaf9SAlex Deucher {
1368d38ceaf9SAlex Deucher 	int sz;
1369d38ceaf9SAlex Deucher 
1370d38ceaf9SAlex Deucher 	adev->mode_info.coherent_mode_property =
13714a580877SLuben Tuikov 		drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
1372d38ceaf9SAlex Deucher 	if (!adev->mode_info.coherent_mode_property)
1373d38ceaf9SAlex Deucher 		return -ENOMEM;
1374d38ceaf9SAlex Deucher 
1375d38ceaf9SAlex Deucher 	adev->mode_info.load_detect_property =
13764a580877SLuben Tuikov 		drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
1377d38ceaf9SAlex Deucher 	if (!adev->mode_info.load_detect_property)
1378d38ceaf9SAlex Deucher 		return -ENOMEM;
1379d38ceaf9SAlex Deucher 
13804a580877SLuben Tuikov 	drm_mode_create_scaling_mode_property(adev_to_drm(adev));
1381d38ceaf9SAlex Deucher 
1382d38ceaf9SAlex Deucher 	sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
1383d38ceaf9SAlex Deucher 	adev->mode_info.underscan_property =
13844a580877SLuben Tuikov 		drm_property_create_enum(adev_to_drm(adev), 0,
1385d38ceaf9SAlex Deucher 					 "underscan",
1386d38ceaf9SAlex Deucher 					 amdgpu_underscan_enum_list, sz);
1387d38ceaf9SAlex Deucher 
1388d38ceaf9SAlex Deucher 	adev->mode_info.underscan_hborder_property =
13894a580877SLuben Tuikov 		drm_property_create_range(adev_to_drm(adev), 0,
1390d38ceaf9SAlex Deucher 					  "underscan hborder", 0, 128);
1391d38ceaf9SAlex Deucher 	if (!adev->mode_info.underscan_hborder_property)
1392d38ceaf9SAlex Deucher 		return -ENOMEM;
1393d38ceaf9SAlex Deucher 
1394d38ceaf9SAlex Deucher 	adev->mode_info.underscan_vborder_property =
13954a580877SLuben Tuikov 		drm_property_create_range(adev_to_drm(adev), 0,
1396d38ceaf9SAlex Deucher 					  "underscan vborder", 0, 128);
1397d38ceaf9SAlex Deucher 	if (!adev->mode_info.underscan_vborder_property)
1398d38ceaf9SAlex Deucher 		return -ENOMEM;
1399d38ceaf9SAlex Deucher 
1400d38ceaf9SAlex Deucher 	sz = ARRAY_SIZE(amdgpu_audio_enum_list);
1401d38ceaf9SAlex Deucher 	adev->mode_info.audio_property =
14024a580877SLuben Tuikov 		drm_property_create_enum(adev_to_drm(adev), 0,
1403d38ceaf9SAlex Deucher 					 "audio",
1404d38ceaf9SAlex Deucher 					 amdgpu_audio_enum_list, sz);
1405d38ceaf9SAlex Deucher 
1406d38ceaf9SAlex Deucher 	sz = ARRAY_SIZE(amdgpu_dither_enum_list);
1407d38ceaf9SAlex Deucher 	adev->mode_info.dither_property =
14084a580877SLuben Tuikov 		drm_property_create_enum(adev_to_drm(adev), 0,
1409d38ceaf9SAlex Deucher 					 "dither",
1410d38ceaf9SAlex Deucher 					 amdgpu_dither_enum_list, sz);
1411d38ceaf9SAlex Deucher 
1412d38ceaf9SAlex Deucher 	return 0;
1413d38ceaf9SAlex Deucher }
1414d38ceaf9SAlex Deucher 
amdgpu_display_update_priority(struct amdgpu_device * adev)1415166140fbSSamuel Li void amdgpu_display_update_priority(struct amdgpu_device *adev)
1416d38ceaf9SAlex Deucher {
1417d38ceaf9SAlex Deucher 	/* adjustment options for the display watermarks */
1418d38ceaf9SAlex Deucher 	if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
1419d38ceaf9SAlex Deucher 		adev->mode_info.disp_priority = 0;
1420d38ceaf9SAlex Deucher 	else
1421d38ceaf9SAlex Deucher 		adev->mode_info.disp_priority = amdgpu_disp_priority;
1422d38ceaf9SAlex Deucher 
1423d38ceaf9SAlex Deucher }
1424d38ceaf9SAlex Deucher 
amdgpu_display_is_hdtv_mode(const struct drm_display_mode * mode)14253a05dc00SSamuel Li static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
1426d38ceaf9SAlex Deucher {
1427d38ceaf9SAlex Deucher 	/* try and guess if this is a tv or a monitor */
1428d38ceaf9SAlex Deucher 	if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1429d38ceaf9SAlex Deucher 	    (mode->vdisplay == 576) || /* 576p */
1430d38ceaf9SAlex Deucher 	    (mode->vdisplay == 720) || /* 720p */
1431d38ceaf9SAlex Deucher 	    (mode->vdisplay == 1080)) /* 1080p */
1432d38ceaf9SAlex Deucher 		return true;
1433d38ceaf9SAlex Deucher 	else
1434d38ceaf9SAlex Deucher 		return false;
1435d38ceaf9SAlex Deucher }
1436d38ceaf9SAlex Deucher 
amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)14370c16443aSSamuel Li bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1438d38ceaf9SAlex Deucher 					const struct drm_display_mode *mode,
1439d38ceaf9SAlex Deucher 					struct drm_display_mode *adjusted_mode)
1440d38ceaf9SAlex Deucher {
1441d38ceaf9SAlex Deucher 	struct drm_device *dev = crtc->dev;
1442d38ceaf9SAlex Deucher 	struct drm_encoder *encoder;
1443d38ceaf9SAlex Deucher 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1444d38ceaf9SAlex Deucher 	struct amdgpu_encoder *amdgpu_encoder;
1445d38ceaf9SAlex Deucher 	struct drm_connector *connector;
1446d38ceaf9SAlex Deucher 	u32 src_v = 1, dst_v = 1;
1447d38ceaf9SAlex Deucher 	u32 src_h = 1, dst_h = 1;
1448d38ceaf9SAlex Deucher 
1449d38ceaf9SAlex Deucher 	amdgpu_crtc->h_border = 0;
1450d38ceaf9SAlex Deucher 	amdgpu_crtc->v_border = 0;
1451d38ceaf9SAlex Deucher 
1452d38ceaf9SAlex Deucher 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1453d38ceaf9SAlex Deucher 		if (encoder->crtc != crtc)
1454d38ceaf9SAlex Deucher 			continue;
1455d38ceaf9SAlex Deucher 		amdgpu_encoder = to_amdgpu_encoder(encoder);
1456d38ceaf9SAlex Deucher 		connector = amdgpu_get_connector_for_encoder(encoder);
1457d38ceaf9SAlex Deucher 
1458d38ceaf9SAlex Deucher 		/* set scaling */
1459d38ceaf9SAlex Deucher 		if (amdgpu_encoder->rmx_type == RMX_OFF)
1460d38ceaf9SAlex Deucher 			amdgpu_crtc->rmx_type = RMX_OFF;
1461d38ceaf9SAlex Deucher 		else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
1462d38ceaf9SAlex Deucher 			 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
1463d38ceaf9SAlex Deucher 			amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
1464d38ceaf9SAlex Deucher 		else
1465d38ceaf9SAlex Deucher 			amdgpu_crtc->rmx_type = RMX_OFF;
1466d38ceaf9SAlex Deucher 		/* copy native mode */
1467d38ceaf9SAlex Deucher 		memcpy(&amdgpu_crtc->native_mode,
1468d38ceaf9SAlex Deucher 		       &amdgpu_encoder->native_mode,
1469d38ceaf9SAlex Deucher 		       sizeof(struct drm_display_mode));
1470d38ceaf9SAlex Deucher 		src_v = crtc->mode.vdisplay;
1471d38ceaf9SAlex Deucher 		dst_v = amdgpu_crtc->native_mode.vdisplay;
1472d38ceaf9SAlex Deucher 		src_h = crtc->mode.hdisplay;
1473d38ceaf9SAlex Deucher 		dst_h = amdgpu_crtc->native_mode.hdisplay;
1474d38ceaf9SAlex Deucher 
1475d38ceaf9SAlex Deucher 		/* fix up for overscan on hdmi */
1476d38ceaf9SAlex Deucher 		if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1477d38ceaf9SAlex Deucher 		    ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
1478d38ceaf9SAlex Deucher 		     ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
14790da531c8STim Huang 		      connector && connector->display_info.is_hdmi &&
14803a05dc00SSamuel Li 		      amdgpu_display_is_hdtv_mode(mode)))) {
1481d38ceaf9SAlex Deucher 			if (amdgpu_encoder->underscan_hborder != 0)
1482d38ceaf9SAlex Deucher 				amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
1483d38ceaf9SAlex Deucher 			else
1484d38ceaf9SAlex Deucher 				amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
1485d38ceaf9SAlex Deucher 			if (amdgpu_encoder->underscan_vborder != 0)
1486d38ceaf9SAlex Deucher 				amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
1487d38ceaf9SAlex Deucher 			else
1488d38ceaf9SAlex Deucher 				amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
1489d38ceaf9SAlex Deucher 			amdgpu_crtc->rmx_type = RMX_FULL;
1490d38ceaf9SAlex Deucher 			src_v = crtc->mode.vdisplay;
1491d38ceaf9SAlex Deucher 			dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
1492d38ceaf9SAlex Deucher 			src_h = crtc->mode.hdisplay;
1493d38ceaf9SAlex Deucher 			dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
1494d38ceaf9SAlex Deucher 		}
1495d38ceaf9SAlex Deucher 	}
1496d38ceaf9SAlex Deucher 	if (amdgpu_crtc->rmx_type != RMX_OFF) {
1497d38ceaf9SAlex Deucher 		fixed20_12 a, b;
149893125cb7SSrinivasan Shanmugam 
1499d38ceaf9SAlex Deucher 		a.full = dfixed_const(src_v);
1500d38ceaf9SAlex Deucher 		b.full = dfixed_const(dst_v);
1501d38ceaf9SAlex Deucher 		amdgpu_crtc->vsc.full = dfixed_div(a, b);
1502d38ceaf9SAlex Deucher 		a.full = dfixed_const(src_h);
1503d38ceaf9SAlex Deucher 		b.full = dfixed_const(dst_h);
1504d38ceaf9SAlex Deucher 		amdgpu_crtc->hsc.full = dfixed_div(a, b);
1505d38ceaf9SAlex Deucher 	} else {
1506d38ceaf9SAlex Deucher 		amdgpu_crtc->vsc.full = dfixed_const(1);
1507d38ceaf9SAlex Deucher 		amdgpu_crtc->hsc.full = dfixed_const(1);
1508d38ceaf9SAlex Deucher 	}
1509d38ceaf9SAlex Deucher 	return true;
1510d38ceaf9SAlex Deucher }
1511d38ceaf9SAlex Deucher 
1512d38ceaf9SAlex Deucher /*
1513d38ceaf9SAlex Deucher  * Retrieve current video scanout position of crtc on a given gpu, and
1514d38ceaf9SAlex Deucher  * an optional accurate timestamp of when query happened.
1515d38ceaf9SAlex Deucher  *
1516d38ceaf9SAlex Deucher  * \param dev Device to query.
151788e72717SThierry Reding  * \param pipe Crtc to query.
151893125cb7SSrinivasan Shanmugam  * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
15198e36f9d3SAlex Deucher  *              For driver internal use only also supports these flags:
15208e36f9d3SAlex Deucher  *
15218e36f9d3SAlex Deucher  *              USE_REAL_VBLANKSTART to use the real start of vblank instead
15228e36f9d3SAlex Deucher  *              of a fudged earlier start of vblank.
15238e36f9d3SAlex Deucher  *
15248e36f9d3SAlex Deucher  *              GET_DISTANCE_TO_VBLANKSTART to return distance to the
15258e36f9d3SAlex Deucher  *              fudged earlier start of vblank in *vpos and the distance
15268e36f9d3SAlex Deucher  *              to true start of vblank in *hpos.
15278e36f9d3SAlex Deucher  *
1528d38ceaf9SAlex Deucher  * \param *vpos Location where vertical scanout position should be stored.
1529d38ceaf9SAlex Deucher  * \param *hpos Location where horizontal scanout position should go.
1530d38ceaf9SAlex Deucher  * \param *stime Target location for timestamp taken immediately before
1531d38ceaf9SAlex Deucher  *               scanout position query. Can be NULL to skip timestamp.
1532d38ceaf9SAlex Deucher  * \param *etime Target location for timestamp taken immediately after
1533d38ceaf9SAlex Deucher  *               scanout position query. Can be NULL to skip timestamp.
1534d38ceaf9SAlex Deucher  *
1535d38ceaf9SAlex Deucher  * Returns vpos as a positive number while in active scanout area.
1536d38ceaf9SAlex Deucher  * Returns vpos as a negative number inside vblank, counting the number
1537d38ceaf9SAlex Deucher  * of scanlines to go until end of vblank, e.g., -1 means "one scanline
1538d38ceaf9SAlex Deucher  * until start of active scanout / end of vblank."
1539d38ceaf9SAlex Deucher  *
1540d38ceaf9SAlex Deucher  * \return Flags, or'ed together as follows:
1541d38ceaf9SAlex Deucher  *
1542d38ceaf9SAlex Deucher  * DRM_SCANOUTPOS_VALID = Query successful.
1543d38ceaf9SAlex Deucher  * DRM_SCANOUTPOS_INVBL = Inside vblank.
1544d38ceaf9SAlex Deucher  * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
1545d38ceaf9SAlex Deucher  * this flag means that returned position may be offset by a constant but
1546d38ceaf9SAlex Deucher  * unknown small number of scanlines wrt. real scanout position.
1547d38ceaf9SAlex Deucher  *
1548d38ceaf9SAlex Deucher  */
amdgpu_display_get_crtc_scanoutpos(struct drm_device * dev,unsigned int pipe,unsigned int flags,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)1549aa8e286aSSamuel Li int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
1550aa8e286aSSamuel Li 			unsigned int pipe, unsigned int flags, int *vpos,
1551aa8e286aSSamuel Li 			int *hpos, ktime_t *stime, ktime_t *etime,
15523bb403bfSVille Syrjälä 			const struct drm_display_mode *mode)
1553d38ceaf9SAlex Deucher {
1554d38ceaf9SAlex Deucher 	u32 vbl = 0, position = 0;
1555d38ceaf9SAlex Deucher 	int vbl_start, vbl_end, vtotal, ret = 0;
1556d38ceaf9SAlex Deucher 	bool in_vbl = true;
1557d38ceaf9SAlex Deucher 
15581348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
1559d38ceaf9SAlex Deucher 
1560d38ceaf9SAlex Deucher 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1561d38ceaf9SAlex Deucher 
1562d38ceaf9SAlex Deucher 	/* Get optional system timestamp before query. */
1563d38ceaf9SAlex Deucher 	if (stime)
1564d38ceaf9SAlex Deucher 		*stime = ktime_get();
1565d38ceaf9SAlex Deucher 
156688e72717SThierry Reding 	if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
1567d38ceaf9SAlex Deucher 		ret |= DRM_SCANOUTPOS_VALID;
1568d38ceaf9SAlex Deucher 
1569d38ceaf9SAlex Deucher 	/* Get optional system timestamp after query. */
1570d38ceaf9SAlex Deucher 	if (etime)
1571d38ceaf9SAlex Deucher 		*etime = ktime_get();
1572d38ceaf9SAlex Deucher 
1573d38ceaf9SAlex Deucher 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1574d38ceaf9SAlex Deucher 
1575d38ceaf9SAlex Deucher 	/* Decode into vertical and horizontal scanout position. */
1576d38ceaf9SAlex Deucher 	*vpos = position & 0x1fff;
1577d38ceaf9SAlex Deucher 	*hpos = (position >> 16) & 0x1fff;
1578d38ceaf9SAlex Deucher 
1579d38ceaf9SAlex Deucher 	/* Valid vblank area boundaries from gpu retrieved? */
1580d38ceaf9SAlex Deucher 	if (vbl > 0) {
1581d38ceaf9SAlex Deucher 		/* Yes: Decode. */
1582d38ceaf9SAlex Deucher 		ret |= DRM_SCANOUTPOS_ACCURATE;
1583d38ceaf9SAlex Deucher 		vbl_start = vbl & 0x1fff;
1584d38ceaf9SAlex Deucher 		vbl_end = (vbl >> 16) & 0x1fff;
1585b2edaac4SSrinivasan Shanmugam 	} else {
1586d38ceaf9SAlex Deucher 		/* No: Fake something reasonable which gives at least ok results. */
15873bb403bfSVille Syrjälä 		vbl_start = mode->crtc_vdisplay;
1588d38ceaf9SAlex Deucher 		vbl_end = 0;
1589d38ceaf9SAlex Deucher 	}
1590d38ceaf9SAlex Deucher 
15918e36f9d3SAlex Deucher 	/* Called from driver internal vblank counter query code? */
15928e36f9d3SAlex Deucher 	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
15938e36f9d3SAlex Deucher 		/* Caller wants distance from real vbl_start in *hpos */
15948e36f9d3SAlex Deucher 		*hpos = *vpos - vbl_start;
15958e36f9d3SAlex Deucher 	}
15968e36f9d3SAlex Deucher 
15978e36f9d3SAlex Deucher 	/* Fudge vblank to start a few scanlines earlier to handle the
15988e36f9d3SAlex Deucher 	 * problem that vblank irqs fire a few scanlines before start
15998e36f9d3SAlex Deucher 	 * of vblank. Some driver internal callers need the true vblank
16008e36f9d3SAlex Deucher 	 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
16018e36f9d3SAlex Deucher 	 *
16028e36f9d3SAlex Deucher 	 * The cause of the "early" vblank irq is that the irq is triggered
16038e36f9d3SAlex Deucher 	 * by the line buffer logic when the line buffer read position enters
16048e36f9d3SAlex Deucher 	 * the vblank, whereas our crtc scanout position naturally lags the
16058e36f9d3SAlex Deucher 	 * line buffer read position.
16068e36f9d3SAlex Deucher 	 */
16078e36f9d3SAlex Deucher 	if (!(flags & USE_REAL_VBLANKSTART))
16088e36f9d3SAlex Deucher 		vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
16098e36f9d3SAlex Deucher 
1610d38ceaf9SAlex Deucher 	/* Test scanout position against vblank region. */
1611d38ceaf9SAlex Deucher 	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1612d38ceaf9SAlex Deucher 		in_vbl = false;
1613d38ceaf9SAlex Deucher 
16148e36f9d3SAlex Deucher 	/* In vblank? */
16158e36f9d3SAlex Deucher 	if (in_vbl)
16168e36f9d3SAlex Deucher 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
16178e36f9d3SAlex Deucher 
16188e36f9d3SAlex Deucher 	/* Called from driver internal vblank counter query code? */
16198e36f9d3SAlex Deucher 	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
16208e36f9d3SAlex Deucher 		/* Caller wants distance from fudged earlier vbl_start */
16218e36f9d3SAlex Deucher 		*vpos -= vbl_start;
16228e36f9d3SAlex Deucher 		return ret;
16238e36f9d3SAlex Deucher 	}
16248e36f9d3SAlex Deucher 
1625d38ceaf9SAlex Deucher 	/* Check if inside vblank area and apply corrective offsets:
1626d38ceaf9SAlex Deucher 	 * vpos will then be >=0 in video scanout area, but negative
1627d38ceaf9SAlex Deucher 	 * within vblank area, counting down the number of lines until
1628d38ceaf9SAlex Deucher 	 * start of scanout.
1629d38ceaf9SAlex Deucher 	 */
1630d38ceaf9SAlex Deucher 
1631d38ceaf9SAlex Deucher 	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
1632d38ceaf9SAlex Deucher 	if (in_vbl && (*vpos >= vbl_start)) {
16333bb403bfSVille Syrjälä 		vtotal = mode->crtc_vtotal;
1634520f08dfSNicholas Kazlauskas 
1635520f08dfSNicholas Kazlauskas 		/* With variable refresh rate displays the vpos can exceed
1636520f08dfSNicholas Kazlauskas 		 * the vtotal value. Clamp to 0 to return -vbl_end instead
1637520f08dfSNicholas Kazlauskas 		 * of guessing the remaining number of lines until scanout.
1638520f08dfSNicholas Kazlauskas 		 */
1639520f08dfSNicholas Kazlauskas 		*vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
1640d38ceaf9SAlex Deucher 	}
1641d38ceaf9SAlex Deucher 
1642d38ceaf9SAlex Deucher 	/* Correct for shifted end of vbl at vbl_end. */
1643d38ceaf9SAlex Deucher 	*vpos = *vpos - vbl_end;
1644d38ceaf9SAlex Deucher 
1645d38ceaf9SAlex Deucher 	return ret;
1646d38ceaf9SAlex Deucher }
1647d38ceaf9SAlex Deucher 
amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device * adev,int crtc)1648734dd01dSSamuel Li int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
1649d38ceaf9SAlex Deucher {
1650d38ceaf9SAlex Deucher 	if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
1651d38ceaf9SAlex Deucher 		return AMDGPU_CRTC_IRQ_NONE;
1652d38ceaf9SAlex Deucher 
1653d38ceaf9SAlex Deucher 	switch (crtc) {
1654d38ceaf9SAlex Deucher 	case 0:
1655d38ceaf9SAlex Deucher 		return AMDGPU_CRTC_IRQ_VBLANK1;
1656d38ceaf9SAlex Deucher 	case 1:
1657d38ceaf9SAlex Deucher 		return AMDGPU_CRTC_IRQ_VBLANK2;
1658d38ceaf9SAlex Deucher 	case 2:
1659d38ceaf9SAlex Deucher 		return AMDGPU_CRTC_IRQ_VBLANK3;
1660d38ceaf9SAlex Deucher 	case 3:
1661d38ceaf9SAlex Deucher 		return AMDGPU_CRTC_IRQ_VBLANK4;
1662d38ceaf9SAlex Deucher 	case 4:
1663d38ceaf9SAlex Deucher 		return AMDGPU_CRTC_IRQ_VBLANK5;
1664d38ceaf9SAlex Deucher 	case 5:
1665d38ceaf9SAlex Deucher 		return AMDGPU_CRTC_IRQ_VBLANK6;
1666d38ceaf9SAlex Deucher 	default:
1667d38ceaf9SAlex Deucher 		return AMDGPU_CRTC_IRQ_NONE;
1668d38ceaf9SAlex Deucher 	}
1669d38ceaf9SAlex Deucher }
1670ea702333SThomas Zimmermann 
amdgpu_crtc_get_scanout_position(struct drm_crtc * crtc,bool in_vblank_irq,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)1671ea702333SThomas Zimmermann bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
1672ea702333SThomas Zimmermann 			bool in_vblank_irq, int *vpos,
1673ea702333SThomas Zimmermann 			int *hpos, ktime_t *stime, ktime_t *etime,
1674ea702333SThomas Zimmermann 			const struct drm_display_mode *mode)
1675ea702333SThomas Zimmermann {
1676ea702333SThomas Zimmermann 	struct drm_device *dev = crtc->dev;
1677ea702333SThomas Zimmermann 	unsigned int pipe = crtc->index;
1678ea702333SThomas Zimmermann 
1679ea702333SThomas Zimmermann 	return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
1680ea702333SThomas Zimmermann 						  stime, etime, mode);
1681ea702333SThomas Zimmermann }
1682a2e15b0eSAlex Deucher 
168389e2b437SAlex Deucher static bool
amdgpu_display_robj_is_fb(struct amdgpu_device * adev,struct amdgpu_bo * robj)168489e2b437SAlex Deucher amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
168589e2b437SAlex Deucher {
168689e2b437SAlex Deucher 	struct drm_device *dev = adev_to_drm(adev);
168789e2b437SAlex Deucher 	struct drm_fb_helper *fb_helper = dev->fb_helper;
168889e2b437SAlex Deucher 
168989e2b437SAlex Deucher 	if (!fb_helper || !fb_helper->buffer)
169089e2b437SAlex Deucher 		return false;
169189e2b437SAlex Deucher 
169289e2b437SAlex Deucher 	if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
169389e2b437SAlex Deucher 		return false;
169489e2b437SAlex Deucher 
169589e2b437SAlex Deucher 	return true;
169689e2b437SAlex Deucher }
169789e2b437SAlex Deucher 
amdgpu_display_suspend_helper(struct amdgpu_device * adev)1698a2e15b0eSAlex Deucher int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
1699a2e15b0eSAlex Deucher {
1700a2e15b0eSAlex Deucher 	struct drm_device *dev = adev_to_drm(adev);
1701a2e15b0eSAlex Deucher 	struct drm_crtc *crtc;
1702a2e15b0eSAlex Deucher 	struct drm_connector *connector;
1703a2e15b0eSAlex Deucher 	struct drm_connector_list_iter iter;
1704a2e15b0eSAlex Deucher 	int r;
1705a2e15b0eSAlex Deucher 
1706c69d5139SGuchun Chen 	drm_kms_helper_poll_disable(dev);
1707c69d5139SGuchun Chen 
1708a2e15b0eSAlex Deucher 	/* turn off display hw */
1709a2e15b0eSAlex Deucher 	drm_modeset_lock_all(dev);
1710a2e15b0eSAlex Deucher 	drm_connector_list_iter_begin(dev, &iter);
1711a2e15b0eSAlex Deucher 	drm_for_each_connector_iter(connector, &iter)
1712a2e15b0eSAlex Deucher 		drm_helper_connector_dpms(connector,
1713a2e15b0eSAlex Deucher 					  DRM_MODE_DPMS_OFF);
1714a2e15b0eSAlex Deucher 	drm_connector_list_iter_end(&iter);
1715a2e15b0eSAlex Deucher 	drm_modeset_unlock_all(dev);
1716a2e15b0eSAlex Deucher 	/* unpin the front buffers and cursors */
1717a2e15b0eSAlex Deucher 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1718a2e15b0eSAlex Deucher 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1719a2e15b0eSAlex Deucher 		struct drm_framebuffer *fb = crtc->primary->fb;
1720a2e15b0eSAlex Deucher 		struct amdgpu_bo *robj;
1721a2e15b0eSAlex Deucher 
1722a2e15b0eSAlex Deucher 		if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1723a2e15b0eSAlex Deucher 			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
172493125cb7SSrinivasan Shanmugam 
1725a2e15b0eSAlex Deucher 			r = amdgpu_bo_reserve(aobj, true);
1726a2e15b0eSAlex Deucher 			if (r == 0) {
1727a2e15b0eSAlex Deucher 				amdgpu_bo_unpin(aobj);
1728a2e15b0eSAlex Deucher 				amdgpu_bo_unreserve(aobj);
1729a2e15b0eSAlex Deucher 			}
1730a2e15b0eSAlex Deucher 		}
1731a2e15b0eSAlex Deucher 
173293125cb7SSrinivasan Shanmugam 		if (!fb || !fb->obj[0])
1733a2e15b0eSAlex Deucher 			continue;
173493125cb7SSrinivasan Shanmugam 
1735a2e15b0eSAlex Deucher 		robj = gem_to_amdgpu_bo(fb->obj[0]);
173689e2b437SAlex Deucher 		if (!amdgpu_display_robj_is_fb(adev, robj)) {
1737a2e15b0eSAlex Deucher 			r = amdgpu_bo_reserve(robj, true);
1738a2e15b0eSAlex Deucher 			if (r == 0) {
1739a2e15b0eSAlex Deucher 				amdgpu_bo_unpin(robj);
1740a2e15b0eSAlex Deucher 				amdgpu_bo_unreserve(robj);
1741a2e15b0eSAlex Deucher 			}
1742a2e15b0eSAlex Deucher 		}
174389e2b437SAlex Deucher 	}
17444b12ee6fSVictor Zhao 	return 0;
1745a2e15b0eSAlex Deucher }
1746a2e15b0eSAlex Deucher 
amdgpu_display_resume_helper(struct amdgpu_device * adev)1747a2e15b0eSAlex Deucher int amdgpu_display_resume_helper(struct amdgpu_device *adev)
1748a2e15b0eSAlex Deucher {
1749a2e15b0eSAlex Deucher 	struct drm_device *dev = adev_to_drm(adev);
1750a2e15b0eSAlex Deucher 	struct drm_connector *connector;
1751a2e15b0eSAlex Deucher 	struct drm_connector_list_iter iter;
1752a2e15b0eSAlex Deucher 	struct drm_crtc *crtc;
1753a2e15b0eSAlex Deucher 	int r;
1754a2e15b0eSAlex Deucher 
1755a2e15b0eSAlex Deucher 	/* pin cursors */
1756a2e15b0eSAlex Deucher 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1757a2e15b0eSAlex Deucher 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1758a2e15b0eSAlex Deucher 
1759a2e15b0eSAlex Deucher 		if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1760a2e15b0eSAlex Deucher 			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
176193125cb7SSrinivasan Shanmugam 
1762a2e15b0eSAlex Deucher 			r = amdgpu_bo_reserve(aobj, true);
1763a2e15b0eSAlex Deucher 			if (r == 0) {
176454b86443SChristian König 				aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1765a2e15b0eSAlex Deucher 				r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
1766a2e15b0eSAlex Deucher 				if (r != 0)
1767a2e15b0eSAlex Deucher 					dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
1768a2e15b0eSAlex Deucher 				amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
1769a2e15b0eSAlex Deucher 				amdgpu_bo_unreserve(aobj);
1770a2e15b0eSAlex Deucher 			}
1771a2e15b0eSAlex Deucher 		}
1772a2e15b0eSAlex Deucher 	}
1773a2e15b0eSAlex Deucher 
1774a2e15b0eSAlex Deucher 	drm_helper_resume_force_mode(dev);
1775a2e15b0eSAlex Deucher 
1776a2e15b0eSAlex Deucher 	/* turn on display hw */
1777a2e15b0eSAlex Deucher 	drm_modeset_lock_all(dev);
1778a2e15b0eSAlex Deucher 
1779a2e15b0eSAlex Deucher 	drm_connector_list_iter_begin(dev, &iter);
1780a2e15b0eSAlex Deucher 	drm_for_each_connector_iter(connector, &iter)
1781a2e15b0eSAlex Deucher 		drm_helper_connector_dpms(connector,
1782a2e15b0eSAlex Deucher 					  DRM_MODE_DPMS_ON);
1783a2e15b0eSAlex Deucher 	drm_connector_list_iter_end(&iter);
1784a2e15b0eSAlex Deucher 
1785a2e15b0eSAlex Deucher 	drm_modeset_unlock_all(dev);
1786a2e15b0eSAlex Deucher 
1787c69d5139SGuchun Chen 	drm_kms_helper_poll_enable(dev);
1788c69d5139SGuchun Chen 
1789a2e15b0eSAlex Deucher 	return 0;
1790a2e15b0eSAlex Deucher }
1791a2e15b0eSAlex Deucher 
1792*fe151ed7SAlex Deucher /* panic_bo is set in amdgpu_dm_plane_get_scanout_buffer() and only used in amdgpu_dm_set_pixel()
1793*fe151ed7SAlex Deucher  * they are called from the panic handler, and protected by the drm_panic spinlock.
1794*fe151ed7SAlex Deucher  */
1795*fe151ed7SAlex Deucher static struct amdgpu_bo *panic_abo;
1796*fe151ed7SAlex Deucher 
1797*fe151ed7SAlex Deucher /* Use the indirect MMIO to write each pixel to the GPU VRAM,
1798*fe151ed7SAlex Deucher  * This is a simplified version of amdgpu_device_mm_access()
1799*fe151ed7SAlex Deucher  */
amdgpu_display_set_pixel(struct drm_scanout_buffer * sb,unsigned int x,unsigned int y,u32 color)1800*fe151ed7SAlex Deucher static void amdgpu_display_set_pixel(struct drm_scanout_buffer *sb,
1801*fe151ed7SAlex Deucher 				     unsigned int x,
1802*fe151ed7SAlex Deucher 				     unsigned int y,
1803*fe151ed7SAlex Deucher 				     u32 color)
1804*fe151ed7SAlex Deucher {
1805*fe151ed7SAlex Deucher 	struct amdgpu_res_cursor cursor;
1806*fe151ed7SAlex Deucher 	unsigned long offset;
1807*fe151ed7SAlex Deucher 	struct amdgpu_bo *abo = panic_abo;
1808*fe151ed7SAlex Deucher 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1809*fe151ed7SAlex Deucher 	uint32_t tmp;
1810*fe151ed7SAlex Deucher 
1811*fe151ed7SAlex Deucher 	offset = x * 4 + y * sb->pitch[0];
1812*fe151ed7SAlex Deucher 	amdgpu_res_first(abo->tbo.resource, offset, 4, &cursor);
1813*fe151ed7SAlex Deucher 
1814*fe151ed7SAlex Deucher 	tmp = cursor.start >> 31;
1815*fe151ed7SAlex Deucher 	WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t) cursor.start) | 0x80000000);
1816*fe151ed7SAlex Deucher 	if (tmp != 0xffffffff)
1817*fe151ed7SAlex Deucher 		WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
1818*fe151ed7SAlex Deucher 	WREG32_NO_KIQ(mmMM_DATA, color);
1819*fe151ed7SAlex Deucher }
1820*fe151ed7SAlex Deucher 
amdgpu_display_get_scanout_buffer(struct drm_plane * plane,struct drm_scanout_buffer * sb)1821*fe151ed7SAlex Deucher int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
1822*fe151ed7SAlex Deucher 				      struct drm_scanout_buffer *sb)
1823*fe151ed7SAlex Deucher {
1824*fe151ed7SAlex Deucher 	struct amdgpu_bo *abo;
1825*fe151ed7SAlex Deucher 	struct drm_framebuffer *fb = plane->state->fb;
1826*fe151ed7SAlex Deucher 
1827*fe151ed7SAlex Deucher 	if (!fb)
1828*fe151ed7SAlex Deucher 		return -EINVAL;
1829*fe151ed7SAlex Deucher 
1830*fe151ed7SAlex Deucher 	DRM_DEBUG_KMS("Framebuffer %dx%d %p4cc\n", fb->width, fb->height, &fb->format->format);
1831*fe151ed7SAlex Deucher 
1832*fe151ed7SAlex Deucher 	abo = gem_to_amdgpu_bo(fb->obj[0]);
1833*fe151ed7SAlex Deucher 	if (!abo)
1834*fe151ed7SAlex Deucher 		return -EINVAL;
1835*fe151ed7SAlex Deucher 
1836*fe151ed7SAlex Deucher 	sb->width = fb->width;
1837*fe151ed7SAlex Deucher 	sb->height = fb->height;
1838*fe151ed7SAlex Deucher 	/* Use the generic linear format, because tiling will be disabled in panic_flush() */
1839*fe151ed7SAlex Deucher 	sb->format = drm_format_info(fb->format->format);
1840*fe151ed7SAlex Deucher 	if (!sb->format)
1841*fe151ed7SAlex Deucher 		return -EINVAL;
1842*fe151ed7SAlex Deucher 
1843*fe151ed7SAlex Deucher 	sb->pitch[0] = fb->pitches[0];
1844*fe151ed7SAlex Deucher 
1845*fe151ed7SAlex Deucher 	if (abo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) {
1846*fe151ed7SAlex Deucher 		if (abo->tbo.resource->mem_type != TTM_PL_VRAM) {
1847*fe151ed7SAlex Deucher 			drm_warn(plane->dev, "amdgpu panic, framebuffer not in VRAM\n");
1848*fe151ed7SAlex Deucher 			return -EINVAL;
1849*fe151ed7SAlex Deucher 		}
1850*fe151ed7SAlex Deucher 		/* Only handle 32bits format, to simplify mmio access */
1851*fe151ed7SAlex Deucher 		if (fb->format->cpp[0] != 4) {
1852*fe151ed7SAlex Deucher 			drm_warn(plane->dev, "amdgpu panic, pixel format is not 32bits\n");
1853*fe151ed7SAlex Deucher 			return -EINVAL;
1854*fe151ed7SAlex Deucher 		}
1855*fe151ed7SAlex Deucher 		sb->set_pixel = amdgpu_display_set_pixel;
1856*fe151ed7SAlex Deucher 		panic_abo = abo;
1857*fe151ed7SAlex Deucher 		return 0;
1858*fe151ed7SAlex Deucher 	}
1859*fe151ed7SAlex Deucher 	if (!abo->kmap.virtual &&
1860*fe151ed7SAlex Deucher 	    ttm_bo_kmap(&abo->tbo, 0, PFN_UP(abo->tbo.base.size), &abo->kmap)) {
1861*fe151ed7SAlex Deucher 		drm_warn(plane->dev, "amdgpu bo map failed, panic won't be displayed\n");
1862*fe151ed7SAlex Deucher 		return -ENOMEM;
1863*fe151ed7SAlex Deucher 	}
1864*fe151ed7SAlex Deucher 	if (abo->kmap.bo_kmap_type & TTM_BO_MAP_IOMEM_MASK)
1865*fe151ed7SAlex Deucher 		iosys_map_set_vaddr_iomem(&sb->map[0], abo->kmap.virtual);
1866*fe151ed7SAlex Deucher 	else
1867*fe151ed7SAlex Deucher 		iosys_map_set_vaddr(&sb->map[0], abo->kmap.virtual);
1868*fe151ed7SAlex Deucher 
1869*fe151ed7SAlex Deucher 	return 0;
1870*fe151ed7SAlex Deucher }
1871