184ec374bSRyan Taylor // SPDX-License-Identifier: GPL-2.0+
284ec374bSRyan Taylor 
384ec374bSRyan Taylor #include <drm/drm_atomic_helper.h>
4d9501844SJani Nikula #include <drm/drm_edid.h>
584ec374bSRyan Taylor #include <drm/drm_simple_kms_helper.h>
6bcfa48ffSJulia Zhang #include <drm/drm_gem_framebuffer_helper.h>
784ec374bSRyan Taylor #include <drm/drm_vblank.h>
884ec374bSRyan Taylor 
984ec374bSRyan Taylor #include "amdgpu.h"
10733ee71aSRyan Taylor #ifdef CONFIG_DRM_AMDGPU_SI
11733ee71aSRyan Taylor #include "dce_v6_0.h"
12733ee71aSRyan Taylor #endif
13733ee71aSRyan Taylor #ifdef CONFIG_DRM_AMDGPU_CIK
14733ee71aSRyan Taylor #include "dce_v8_0.h"
15733ee71aSRyan Taylor #endif
16733ee71aSRyan Taylor #include "dce_v10_0.h"
17733ee71aSRyan Taylor #include "dce_v11_0.h"
18733ee71aSRyan Taylor #include "ivsrcid/ivsrcid_vislands30.h"
1984ec374bSRyan Taylor #include "amdgpu_vkms.h"
2084ec374bSRyan Taylor #include "amdgpu_display.h"
21deefd07eSFlora Cui #include "atom.h"
22deefd07eSFlora Cui #include "amdgpu_irq.h"
2384ec374bSRyan Taylor 
2484ec374bSRyan Taylor /**
2584ec374bSRyan Taylor  * DOC: amdgpu_vkms
2684ec374bSRyan Taylor  *
2784ec374bSRyan Taylor  * The amdgpu vkms interface provides a virtual KMS interface for several use
2884ec374bSRyan Taylor  * cases: devices without display hardware, platforms where the actual display
2984ec374bSRyan Taylor  * hardware is not useful (e.g., servers), SR-IOV virtual functions, device
3084ec374bSRyan Taylor  * emulation/simulation, and device bring up prior to display hardware being
3184ec374bSRyan Taylor  * usable. We previously emulated a legacy KMS interface, but there was a desire
3284ec374bSRyan Taylor  * to move to the atomic KMS interface. The vkms driver did everything we
3384ec374bSRyan Taylor  * needed, but we wanted KMS support natively in the driver without buffer
3484ec374bSRyan Taylor  * sharing and the ability to support an instance of VKMS per device. We first
3584ec374bSRyan Taylor  * looked at splitting vkms into a stub driver and a helper module that other
3684ec374bSRyan Taylor  * drivers could use to implement a virtual display, but this strategy ended up
3784ec374bSRyan Taylor  * being messy due to driver specific callbacks needed for buffer management.
3884ec374bSRyan Taylor  * Ultimately, it proved easier to import the vkms code as it mostly used core
3984ec374bSRyan Taylor  * drm helpers anyway.
4084ec374bSRyan Taylor  */
4184ec374bSRyan Taylor 
4284ec374bSRyan Taylor static const u32 amdgpu_vkms_formats[] = {
4384ec374bSRyan Taylor 	DRM_FORMAT_XRGB8888,
4484ec374bSRyan Taylor };
4584ec374bSRyan Taylor 
amdgpu_vkms_vblank_simulate(struct hrtimer * timer)4684ec374bSRyan Taylor static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
4784ec374bSRyan Taylor {
48deefd07eSFlora Cui 	struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer);
49deefd07eSFlora Cui 	struct drm_crtc *crtc = &amdgpu_crtc->base;
50deefd07eSFlora Cui 	struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
5184ec374bSRyan Taylor 	u64 ret_overrun;
5284ec374bSRyan Taylor 	bool ret;
5384ec374bSRyan Taylor 
54deefd07eSFlora Cui 	ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer,
5584ec374bSRyan Taylor 					  output->period_ns);
56f7ed3f90SFlora Cui 	if (ret_overrun != 1)
57f7ed3f90SFlora Cui 		DRM_WARN("%s: vblank timer overrun\n", __func__);
5884ec374bSRyan Taylor 
5984ec374bSRyan Taylor 	ret = drm_crtc_handle_vblank(crtc);
60826c1e92SGuchun Chen 	/* Don't queue timer again when vblank is disabled. */
6184ec374bSRyan Taylor 	if (!ret)
62826c1e92SGuchun Chen 		return HRTIMER_NORESTART;
6384ec374bSRyan Taylor 
6484ec374bSRyan Taylor 	return HRTIMER_RESTART;
6584ec374bSRyan Taylor }
6684ec374bSRyan Taylor 
amdgpu_vkms_enable_vblank(struct drm_crtc * crtc)6784ec374bSRyan Taylor static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc)
6884ec374bSRyan Taylor {
69d26238c6SVille Syrjälä 	struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
7084ec374bSRyan Taylor 	struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
71deefd07eSFlora Cui 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7284ec374bSRyan Taylor 
7384ec374bSRyan Taylor 	drm_calc_timestamping_constants(crtc, &crtc->mode);
7484ec374bSRyan Taylor 
7584ec374bSRyan Taylor 	out->period_ns = ktime_set(0, vblank->framedur_ns);
76deefd07eSFlora Cui 	hrtimer_start(&amdgpu_crtc->vblank_timer, out->period_ns, HRTIMER_MODE_REL);
7784ec374bSRyan Taylor 
7884ec374bSRyan Taylor 	return 0;
7984ec374bSRyan Taylor }
8084ec374bSRyan Taylor 
amdgpu_vkms_disable_vblank(struct drm_crtc * crtc)8184ec374bSRyan Taylor static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
8284ec374bSRyan Taylor {
83deefd07eSFlora Cui 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8484ec374bSRyan Taylor 
85826c1e92SGuchun Chen 	hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);
8684ec374bSRyan Taylor }
8784ec374bSRyan Taylor 
amdgpu_vkms_get_vblank_timestamp(struct drm_crtc * crtc,int * max_error,ktime_t * vblank_time,bool in_vblank_irq)8884ec374bSRyan Taylor static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
8984ec374bSRyan Taylor 					     int *max_error,
9084ec374bSRyan Taylor 					     ktime_t *vblank_time,
9184ec374bSRyan Taylor 					     bool in_vblank_irq)
9284ec374bSRyan Taylor {
9384ec374bSRyan Taylor 	struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
94d26238c6SVille Syrjälä 	struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
95deefd07eSFlora Cui 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9684ec374bSRyan Taylor 
9784ec374bSRyan Taylor 	if (!READ_ONCE(vblank->enabled)) {
9884ec374bSRyan Taylor 		*vblank_time = ktime_get();
9984ec374bSRyan Taylor 		return true;
10084ec374bSRyan Taylor 	}
10184ec374bSRyan Taylor 
102deefd07eSFlora Cui 	*vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires);
10384ec374bSRyan Taylor 
10484ec374bSRyan Taylor 	if (WARN_ON(*vblank_time == vblank->time))
10584ec374bSRyan Taylor 		return true;
10684ec374bSRyan Taylor 
10784ec374bSRyan Taylor 	/*
10884ec374bSRyan Taylor 	 * To prevent races we roll the hrtimer forward before we do any
10984ec374bSRyan Taylor 	 * interrupt processing - this is how real hw works (the interrupt is
11084ec374bSRyan Taylor 	 * only generated after all the vblank registers are updated) and what
11184ec374bSRyan Taylor 	 * the vblank core expects. Therefore we need to always correct the
11284ec374bSRyan Taylor 	 * timestampe by one frame.
11384ec374bSRyan Taylor 	 */
11484ec374bSRyan Taylor 	*vblank_time -= output->period_ns;
11584ec374bSRyan Taylor 
11684ec374bSRyan Taylor 	return true;
11784ec374bSRyan Taylor }
11884ec374bSRyan Taylor 
11984ec374bSRyan Taylor static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = {
12084ec374bSRyan Taylor 	.set_config             = drm_atomic_helper_set_config,
12184ec374bSRyan Taylor 	.destroy                = drm_crtc_cleanup,
12284ec374bSRyan Taylor 	.page_flip              = drm_atomic_helper_page_flip,
12384ec374bSRyan Taylor 	.reset                  = drm_atomic_helper_crtc_reset,
12484ec374bSRyan Taylor 	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
12584ec374bSRyan Taylor 	.atomic_destroy_state   = drm_atomic_helper_crtc_destroy_state,
12684ec374bSRyan Taylor 	.enable_vblank		= amdgpu_vkms_enable_vblank,
12784ec374bSRyan Taylor 	.disable_vblank		= amdgpu_vkms_disable_vblank,
12884ec374bSRyan Taylor 	.get_vblank_timestamp	= amdgpu_vkms_get_vblank_timestamp,
12984ec374bSRyan Taylor };
13084ec374bSRyan Taylor 
amdgpu_vkms_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)13184ec374bSRyan Taylor static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
13284ec374bSRyan Taylor 					   struct drm_atomic_state *state)
13384ec374bSRyan Taylor {
13484ec374bSRyan Taylor 	drm_crtc_vblank_on(crtc);
13584ec374bSRyan Taylor }
13684ec374bSRyan Taylor 
amdgpu_vkms_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)13784ec374bSRyan Taylor static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
13884ec374bSRyan Taylor 					    struct drm_atomic_state *state)
13984ec374bSRyan Taylor {
14084ec374bSRyan Taylor 	drm_crtc_vblank_off(crtc);
14184ec374bSRyan Taylor }
14284ec374bSRyan Taylor 
amdgpu_vkms_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)14384ec374bSRyan Taylor static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
14484ec374bSRyan Taylor 					  struct drm_atomic_state *state)
14584ec374bSRyan Taylor {
1462096b74bSGuchun Chen 	unsigned long flags;
14784ec374bSRyan Taylor 	if (crtc->state->event) {
1482096b74bSGuchun Chen 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
14984ec374bSRyan Taylor 
15084ec374bSRyan Taylor 		if (drm_crtc_vblank_get(crtc) != 0)
15184ec374bSRyan Taylor 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
15284ec374bSRyan Taylor 		else
15384ec374bSRyan Taylor 			drm_crtc_arm_vblank_event(crtc, crtc->state->event);
15484ec374bSRyan Taylor 
1552096b74bSGuchun Chen 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
15684ec374bSRyan Taylor 
15784ec374bSRyan Taylor 		crtc->state->event = NULL;
15884ec374bSRyan Taylor 	}
15984ec374bSRyan Taylor }
16084ec374bSRyan Taylor 
16184ec374bSRyan Taylor static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = {
16284ec374bSRyan Taylor 	.atomic_flush	= amdgpu_vkms_crtc_atomic_flush,
16384ec374bSRyan Taylor 	.atomic_enable	= amdgpu_vkms_crtc_atomic_enable,
16484ec374bSRyan Taylor 	.atomic_disable	= amdgpu_vkms_crtc_atomic_disable,
16584ec374bSRyan Taylor };
16684ec374bSRyan Taylor 
amdgpu_vkms_crtc_init(struct drm_device * dev,struct drm_crtc * crtc,struct drm_plane * primary,struct drm_plane * cursor)16784ec374bSRyan Taylor static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
16884ec374bSRyan Taylor 			  struct drm_plane *primary, struct drm_plane *cursor)
16984ec374bSRyan Taylor {
170deefd07eSFlora Cui 	struct amdgpu_device *adev = drm_to_adev(dev);
171deefd07eSFlora Cui 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
17284ec374bSRyan Taylor 	int ret;
17384ec374bSRyan Taylor 
17484ec374bSRyan Taylor 	ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
17584ec374bSRyan Taylor 					&amdgpu_vkms_crtc_funcs, NULL);
17684ec374bSRyan Taylor 	if (ret) {
17784ec374bSRyan Taylor 		DRM_ERROR("Failed to init CRTC\n");
17884ec374bSRyan Taylor 		return ret;
17984ec374bSRyan Taylor 	}
18084ec374bSRyan Taylor 
18184ec374bSRyan Taylor 	drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs);
18284ec374bSRyan Taylor 
183deefd07eSFlora Cui 	amdgpu_crtc->crtc_id = drm_crtc_index(crtc);
184deefd07eSFlora Cui 	adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc;
185deefd07eSFlora Cui 
186deefd07eSFlora Cui 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
187deefd07eSFlora Cui 	amdgpu_crtc->encoder = NULL;
188deefd07eSFlora Cui 	amdgpu_crtc->connector = NULL;
189deefd07eSFlora Cui 	amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
190deefd07eSFlora Cui 
191deefd07eSFlora Cui 	hrtimer_setup(&amdgpu_crtc->vblank_timer, &amdgpu_vkms_vblank_simulate, CLOCK_MONOTONIC,
192deefd07eSFlora Cui 		      HRTIMER_MODE_REL);
193deefd07eSFlora Cui 
19484ec374bSRyan Taylor 	return ret;
19584ec374bSRyan Taylor }
19684ec374bSRyan Taylor 
19784ec374bSRyan Taylor static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = {
19884ec374bSRyan Taylor 	.fill_modes = drm_helper_probe_single_connector_modes,
19984ec374bSRyan Taylor 	.destroy = drm_connector_cleanup,
20084ec374bSRyan Taylor 	.reset = drm_atomic_helper_connector_reset,
20184ec374bSRyan Taylor 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
20284ec374bSRyan Taylor 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
20384ec374bSRyan Taylor };
20484ec374bSRyan Taylor 
amdgpu_vkms_conn_get_modes(struct drm_connector * connector)20584ec374bSRyan Taylor static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
20684ec374bSRyan Taylor {
20784ec374bSRyan Taylor 	struct drm_device *dev = connector->dev;
20884ec374bSRyan Taylor 	struct drm_display_mode *mode = NULL;
20984ec374bSRyan Taylor 	unsigned i;
21084ec374bSRyan Taylor 	static const struct mode_size {
21184ec374bSRyan Taylor 		int w;
21284ec374bSRyan Taylor 		int h;
21384ec374bSRyan Taylor 	} common_modes[] = {
21484ec374bSRyan Taylor 		{ 640,  480},
21584ec374bSRyan Taylor 		{ 720,  480},
21684ec374bSRyan Taylor 		{ 800,  600},
21784ec374bSRyan Taylor 		{ 848,  480},
21884ec374bSRyan Taylor 		{1024,  768},
21984ec374bSRyan Taylor 		{1152,  768},
22084ec374bSRyan Taylor 		{1280,  720},
22184ec374bSRyan Taylor 		{1280,  800},
22284ec374bSRyan Taylor 		{1280,  854},
22384ec374bSRyan Taylor 		{1280,  960},
22484ec374bSRyan Taylor 		{1280, 1024},
22584ec374bSRyan Taylor 		{1440,  900},
22684ec374bSRyan Taylor 		{1400, 1050},
22784ec374bSRyan Taylor 		{1680, 1050},
22884ec374bSRyan Taylor 		{1600, 1200},
22984ec374bSRyan Taylor 		{1920, 1080},
23084ec374bSRyan Taylor 		{1920, 1200},
23184ec374bSRyan Taylor 		{2560, 1440},
23284ec374bSRyan Taylor 		{4096, 3112},
23384ec374bSRyan Taylor 		{3656, 2664},
23484ec374bSRyan Taylor 		{3840, 2160},
23584ec374bSRyan Taylor 		{4096, 2160},
23684ec374bSRyan Taylor 	};
23784ec374bSRyan Taylor 
23884ec374bSRyan Taylor 	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
23984ec374bSRyan Taylor 		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
240cd905115SMa Ke 		if (!mode)
241cd905115SMa Ke 			continue;
24284ec374bSRyan Taylor 		drm_mode_probed_add(connector, mode);
24384ec374bSRyan Taylor 	}
24484ec374bSRyan Taylor 
24584ec374bSRyan Taylor 	drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
24684ec374bSRyan Taylor 
24784ec374bSRyan Taylor 	return ARRAY_SIZE(common_modes);
24884ec374bSRyan Taylor }
24984ec374bSRyan Taylor 
25084ec374bSRyan Taylor static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = {
25184ec374bSRyan Taylor 	.get_modes    = amdgpu_vkms_conn_get_modes,
25284ec374bSRyan Taylor };
25384ec374bSRyan Taylor 
25484ec374bSRyan Taylor static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = {
25584ec374bSRyan Taylor 	.update_plane		= drm_atomic_helper_update_plane,
25684ec374bSRyan Taylor 	.disable_plane		= drm_atomic_helper_disable_plane,
25784ec374bSRyan Taylor 	.destroy		= drm_plane_cleanup,
25884ec374bSRyan Taylor 	.reset			= drm_atomic_helper_plane_reset,
25984ec374bSRyan Taylor 	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
26084ec374bSRyan Taylor 	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
26184ec374bSRyan Taylor };
26284ec374bSRyan Taylor 
amdgpu_vkms_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * old_state)26384ec374bSRyan Taylor static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane,
26484ec374bSRyan Taylor 					    struct drm_atomic_state *old_state)
26584ec374bSRyan Taylor {
26684ec374bSRyan Taylor 	return;
26784ec374bSRyan Taylor }
26884ec374bSRyan Taylor 
amdgpu_vkms_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)26984ec374bSRyan Taylor static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane,
27084ec374bSRyan Taylor 					  struct drm_atomic_state *state)
27184ec374bSRyan Taylor {
27284ec374bSRyan Taylor 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
27384ec374bSRyan Taylor 										 plane);
27484ec374bSRyan Taylor 	struct drm_crtc_state *crtc_state;
27584ec374bSRyan Taylor 	int ret;
27684ec374bSRyan Taylor 
27784ec374bSRyan Taylor 	if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
27884ec374bSRyan Taylor 		return 0;
27984ec374bSRyan Taylor 
28084ec374bSRyan Taylor 	crtc_state = drm_atomic_get_crtc_state(state,
28184ec374bSRyan Taylor 					       new_plane_state->crtc);
28284ec374bSRyan Taylor 	if (IS_ERR(crtc_state))
28384ec374bSRyan Taylor 		return PTR_ERR(crtc_state);
28484ec374bSRyan Taylor 
28584ec374bSRyan Taylor 	ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
286cce32e4eSThomas Zimmermann 						  DRM_PLANE_NO_SCALING,
287cce32e4eSThomas Zimmermann 						  DRM_PLANE_NO_SCALING,
28884ec374bSRyan Taylor 						  false, true);
28984ec374bSRyan Taylor 	if (ret != 0)
29084ec374bSRyan Taylor 		return ret;
29184ec374bSRyan Taylor 
29284ec374bSRyan Taylor 	/* for now primary plane must be visible and full screen */
29384ec374bSRyan Taylor 	if (!new_plane_state->visible)
29484ec374bSRyan Taylor 		return -EINVAL;
29584ec374bSRyan Taylor 
29684ec374bSRyan Taylor 	return 0;
29784ec374bSRyan Taylor }
29884ec374bSRyan Taylor 
amdgpu_vkms_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)29984ec374bSRyan Taylor static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
30084ec374bSRyan Taylor 				  struct drm_plane_state *new_state)
30184ec374bSRyan Taylor {
30284ec374bSRyan Taylor 	struct amdgpu_framebuffer *afb;
30384ec374bSRyan Taylor 	struct drm_gem_object *obj;
30484ec374bSRyan Taylor 	struct amdgpu_device *adev;
30584ec374bSRyan Taylor 	struct amdgpu_bo *rbo;
30684ec374bSRyan Taylor 	uint32_t domain;
30784ec374bSRyan Taylor 	int r;
30884ec374bSRyan Taylor 
30984ec374bSRyan Taylor 	if (!new_state->fb) {
31084ec374bSRyan Taylor 		DRM_DEBUG_KMS("No FB bound\n");
31184ec374bSRyan Taylor 		return 0;
31284ec374bSRyan Taylor 	}
31384ec374bSRyan Taylor 	afb = to_amdgpu_framebuffer(new_state->fb);
314bcfa48ffSJulia Zhang 
315bcfa48ffSJulia Zhang 	obj = drm_gem_fb_get_obj(new_state->fb, 0);
316bcfa48ffSJulia Zhang 	if (!obj) {
317bcfa48ffSJulia Zhang 		DRM_ERROR("Failed to get obj from framebuffer\n");
318bcfa48ffSJulia Zhang 		return -EINVAL;
319bcfa48ffSJulia Zhang 	}
320bcfa48ffSJulia Zhang 
32184ec374bSRyan Taylor 	rbo = gem_to_amdgpu_bo(obj);
32284ec374bSRyan Taylor 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
32384ec374bSRyan Taylor 
32432c2d7a5SChristian König 	r = amdgpu_bo_reserve(rbo, true);
32584ec374bSRyan Taylor 	if (r) {
32684ec374bSRyan Taylor 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
32784ec374bSRyan Taylor 		return r;
32884ec374bSRyan Taylor 	}
32984ec374bSRyan Taylor 
33032c2d7a5SChristian König 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
33132c2d7a5SChristian König 	if (r) {
33232c2d7a5SChristian König 		dev_err(adev->dev, "allocating fence slot failed (%d)\n", r);
33332c2d7a5SChristian König 		goto error_unlock;
33432c2d7a5SChristian König 	}
33532c2d7a5SChristian König 
33684ec374bSRyan Taylor 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
33784ec374bSRyan Taylor 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
33884ec374bSRyan Taylor 	else
33984ec374bSRyan Taylor 		domain = AMDGPU_GEM_DOMAIN_VRAM;
34084ec374bSRyan Taylor 
34154b86443SChristian König 	rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
34284ec374bSRyan Taylor 	r = amdgpu_bo_pin(rbo, domain);
34384ec374bSRyan Taylor 	if (unlikely(r != 0)) {
34484ec374bSRyan Taylor 		if (r != -ERESTARTSYS)
34584ec374bSRyan Taylor 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
34632c2d7a5SChristian König 		goto error_unlock;
34784ec374bSRyan Taylor 	}
34884ec374bSRyan Taylor 
34984ec374bSRyan Taylor 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
35084ec374bSRyan Taylor 	if (unlikely(r != 0)) {
35184ec374bSRyan Taylor 		DRM_ERROR("%p bind failed\n", rbo);
35232c2d7a5SChristian König 		goto error_unpin;
35384ec374bSRyan Taylor 	}
35484ec374bSRyan Taylor 
35532c2d7a5SChristian König 	amdgpu_bo_unreserve(rbo);
35684ec374bSRyan Taylor 
35784ec374bSRyan Taylor 	afb->address = amdgpu_bo_gpu_offset(rbo);
35884ec374bSRyan Taylor 
35984ec374bSRyan Taylor 	amdgpu_bo_ref(rbo);
36084ec374bSRyan Taylor 
36184ec374bSRyan Taylor 	return 0;
36232c2d7a5SChristian König 
36332c2d7a5SChristian König error_unpin:
36432c2d7a5SChristian König 	amdgpu_bo_unpin(rbo);
36532c2d7a5SChristian König 
36632c2d7a5SChristian König error_unlock:
36732c2d7a5SChristian König 	amdgpu_bo_unreserve(rbo);
36832c2d7a5SChristian König 	return r;
36984ec374bSRyan Taylor }
37084ec374bSRyan Taylor 
amdgpu_vkms_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)37184ec374bSRyan Taylor static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
37284ec374bSRyan Taylor 				   struct drm_plane_state *old_state)
37384ec374bSRyan Taylor {
37484ec374bSRyan Taylor 	struct amdgpu_bo *rbo;
375bcfa48ffSJulia Zhang 	struct drm_gem_object *obj;
37684ec374bSRyan Taylor 	int r;
37784ec374bSRyan Taylor 
37884ec374bSRyan Taylor 	if (!old_state->fb)
37984ec374bSRyan Taylor 		return;
38084ec374bSRyan Taylor 
381bcfa48ffSJulia Zhang 	obj = drm_gem_fb_get_obj(old_state->fb, 0);
382bcfa48ffSJulia Zhang 	if (!obj) {
383bcfa48ffSJulia Zhang 		DRM_ERROR("Failed to get obj from framebuffer\n");
384bcfa48ffSJulia Zhang 		return;
385bcfa48ffSJulia Zhang 	}
386bcfa48ffSJulia Zhang 
387bcfa48ffSJulia Zhang 	rbo = gem_to_amdgpu_bo(obj);
38884ec374bSRyan Taylor 	r = amdgpu_bo_reserve(rbo, false);
38984ec374bSRyan Taylor 	if (unlikely(r)) {
39084ec374bSRyan Taylor 		DRM_ERROR("failed to reserve rbo before unpin\n");
39184ec374bSRyan Taylor 		return;
39284ec374bSRyan Taylor 	}
39384ec374bSRyan Taylor 
39484ec374bSRyan Taylor 	amdgpu_bo_unpin(rbo);
39584ec374bSRyan Taylor 	amdgpu_bo_unreserve(rbo);
39684ec374bSRyan Taylor 	amdgpu_bo_unref(&rbo);
39784ec374bSRyan Taylor }
39884ec374bSRyan Taylor 
39984ec374bSRyan Taylor static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = {
40084ec374bSRyan Taylor 	.atomic_update		= amdgpu_vkms_plane_atomic_update,
40184ec374bSRyan Taylor 	.atomic_check		= amdgpu_vkms_plane_atomic_check,
40284ec374bSRyan Taylor 	.prepare_fb		= amdgpu_vkms_prepare_fb,
40384ec374bSRyan Taylor 	.cleanup_fb		= amdgpu_vkms_cleanup_fb,
40484ec374bSRyan Taylor };
40584ec374bSRyan Taylor 
amdgpu_vkms_plane_init(struct drm_device * dev,enum drm_plane_type type,int index)40684ec374bSRyan Taylor static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
40784ec374bSRyan Taylor 						enum drm_plane_type type,
40884ec374bSRyan Taylor 						int index)
40984ec374bSRyan Taylor {
41084ec374bSRyan Taylor 	struct drm_plane *plane;
41184ec374bSRyan Taylor 	int ret;
41284ec374bSRyan Taylor 
41384ec374bSRyan Taylor 	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
41484ec374bSRyan Taylor 	if (!plane)
41584ec374bSRyan Taylor 		return ERR_PTR(-ENOMEM);
41684ec374bSRyan Taylor 
41784ec374bSRyan Taylor 	ret = drm_universal_plane_init(dev, plane, 1 << index,
41884ec374bSRyan Taylor 				       &amdgpu_vkms_plane_funcs,
41984ec374bSRyan Taylor 				       amdgpu_vkms_formats,
42084ec374bSRyan Taylor 				       ARRAY_SIZE(amdgpu_vkms_formats),
42197c61e0bSGuchun Chen 				       NULL, type, NULL);
42284ec374bSRyan Taylor 	if (ret) {
42384ec374bSRyan Taylor 		kfree(plane);
42484ec374bSRyan Taylor 		return ERR_PTR(ret);
42584ec374bSRyan Taylor 	}
42684ec374bSRyan Taylor 
42784ec374bSRyan Taylor 	drm_plane_helper_add(plane, &amdgpu_vkms_primary_helper_funcs);
42884ec374bSRyan Taylor 
42984ec374bSRyan Taylor 	return plane;
43084ec374bSRyan Taylor }
43184ec374bSRyan Taylor 
amdgpu_vkms_output_init(struct drm_device * dev,struct amdgpu_vkms_output * output,int index)4322351b7d4SIsabella Basso static int amdgpu_vkms_output_init(struct drm_device *dev, struct
4332351b7d4SIsabella Basso 				   amdgpu_vkms_output *output, int index)
43484ec374bSRyan Taylor {
43584ec374bSRyan Taylor 	struct drm_connector *connector = &output->connector;
43684ec374bSRyan Taylor 	struct drm_encoder *encoder = &output->encoder;
437deefd07eSFlora Cui 	struct drm_crtc *crtc = &output->crtc.base;
43884ec374bSRyan Taylor 	struct drm_plane *primary, *cursor = NULL;
43984ec374bSRyan Taylor 	int ret;
44084ec374bSRyan Taylor 
44184ec374bSRyan Taylor 	primary = amdgpu_vkms_plane_init(dev, DRM_PLANE_TYPE_PRIMARY, index);
44284ec374bSRyan Taylor 	if (IS_ERR(primary))
44384ec374bSRyan Taylor 		return PTR_ERR(primary);
44484ec374bSRyan Taylor 
44584ec374bSRyan Taylor 	ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor);
44684ec374bSRyan Taylor 	if (ret)
44784ec374bSRyan Taylor 		goto err_crtc;
44884ec374bSRyan Taylor 
44984ec374bSRyan Taylor 	ret = drm_connector_init(dev, connector, &amdgpu_vkms_connector_funcs,
45084ec374bSRyan Taylor 				 DRM_MODE_CONNECTOR_VIRTUAL);
45184ec374bSRyan Taylor 	if (ret) {
45284ec374bSRyan Taylor 		DRM_ERROR("Failed to init connector\n");
45384ec374bSRyan Taylor 		goto err_connector;
45484ec374bSRyan Taylor 	}
45584ec374bSRyan Taylor 
45684ec374bSRyan Taylor 	drm_connector_helper_add(connector, &amdgpu_vkms_conn_helper_funcs);
45784ec374bSRyan Taylor 
45884ec374bSRyan Taylor 	ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
45984ec374bSRyan Taylor 	if (ret) {
46084ec374bSRyan Taylor 		DRM_ERROR("Failed to init encoder\n");
46184ec374bSRyan Taylor 		goto err_encoder;
46284ec374bSRyan Taylor 	}
46384ec374bSRyan Taylor 	encoder->possible_crtcs = 1 << index;
46484ec374bSRyan Taylor 
46584ec374bSRyan Taylor 	ret = drm_connector_attach_encoder(connector, encoder);
46684ec374bSRyan Taylor 	if (ret) {
46784ec374bSRyan Taylor 		DRM_ERROR("Failed to attach connector to encoder\n");
46884ec374bSRyan Taylor 		goto err_attach;
46984ec374bSRyan Taylor 	}
47084ec374bSRyan Taylor 
47184ec374bSRyan Taylor 	drm_mode_config_reset(dev);
47284ec374bSRyan Taylor 
47384ec374bSRyan Taylor 	return 0;
47484ec374bSRyan Taylor 
47584ec374bSRyan Taylor err_attach:
47684ec374bSRyan Taylor 	drm_encoder_cleanup(encoder);
47784ec374bSRyan Taylor 
47884ec374bSRyan Taylor err_encoder:
47984ec374bSRyan Taylor 	drm_connector_cleanup(connector);
48084ec374bSRyan Taylor 
48184ec374bSRyan Taylor err_connector:
48284ec374bSRyan Taylor 	drm_crtc_cleanup(crtc);
48384ec374bSRyan Taylor 
48484ec374bSRyan Taylor err_crtc:
48584ec374bSRyan Taylor 	drm_plane_cleanup(primary);
48684ec374bSRyan Taylor 
48784ec374bSRyan Taylor 	return ret;
48884ec374bSRyan Taylor }
489733ee71aSRyan Taylor 
490733ee71aSRyan Taylor const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = {
491733ee71aSRyan Taylor 	.fb_create = amdgpu_display_user_framebuffer_create,
492733ee71aSRyan Taylor 	.atomic_check = drm_atomic_helper_check,
493733ee71aSRyan Taylor 	.atomic_commit = drm_atomic_helper_commit,
494733ee71aSRyan Taylor };
495733ee71aSRyan Taylor 
amdgpu_vkms_sw_init(struct amdgpu_ip_block * ip_block)496d5347e8dSSunil Khatri static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block)
497733ee71aSRyan Taylor {
498733ee71aSRyan Taylor 	int r, i;
499d5347e8dSSunil Khatri 	struct amdgpu_device *adev = ip_block->adev;
500733ee71aSRyan Taylor 
50130c1e391SFlora Cui 	adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc,
50230c1e391SFlora Cui 		sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
50330c1e391SFlora Cui 	if (!adev->amdgpu_vkms_output)
50430c1e391SFlora Cui 		return -ENOMEM;
50530c1e391SFlora Cui 
506733ee71aSRyan Taylor 	adev_to_drm(adev)->max_vblank_count = 0;
507733ee71aSRyan Taylor 
508733ee71aSRyan Taylor 	adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs;
509733ee71aSRyan Taylor 
510733ee71aSRyan Taylor 	adev_to_drm(adev)->mode_config.max_width = XRES_MAX;
511733ee71aSRyan Taylor 	adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
512733ee71aSRyan Taylor 
513733ee71aSRyan Taylor 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
514a6250bdbSAlex Deucher 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
515733ee71aSRyan Taylor 
516887e8cecSYifan Zhang 	adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
517887e8cecSYifan Zhang 
518733ee71aSRyan Taylor 	r = amdgpu_display_modeset_create_props(adev);
519733ee71aSRyan Taylor 	if (r)
520733ee71aSRyan Taylor 		return r;
521733ee71aSRyan Taylor 
522733ee71aSRyan Taylor 	/* allocate crtcs, encoders, connectors */
523733ee71aSRyan Taylor 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
524733ee71aSRyan Taylor 		r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i);
525733ee71aSRyan Taylor 		if (r)
526733ee71aSRyan Taylor 			return r;
527733ee71aSRyan Taylor 	}
528733ee71aSRyan Taylor 
529a347ca97SAlex Deucher 	r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
530a347ca97SAlex Deucher 	if (r)
531a347ca97SAlex Deucher 		return r;
532a347ca97SAlex Deucher 
533733ee71aSRyan Taylor 	drm_kms_helper_poll_init(adev_to_drm(adev));
534733ee71aSRyan Taylor 
535733ee71aSRyan Taylor 	adev->mode_info.mode_config_initialized = true;
536733ee71aSRyan Taylor 	return 0;
537733ee71aSRyan Taylor }
538733ee71aSRyan Taylor 
amdgpu_vkms_sw_fini(struct amdgpu_ip_block * ip_block)53936aa9ab9SSunil Khatri static int amdgpu_vkms_sw_fini(struct amdgpu_ip_block *ip_block)
540733ee71aSRyan Taylor {
54136aa9ab9SSunil Khatri 	struct amdgpu_device *adev = ip_block->adev;
542733ee71aSRyan Taylor 	int i = 0;
543733ee71aSRyan Taylor 
544733ee71aSRyan Taylor 	for (i = 0; i < adev->mode_info.num_crtc; i++)
545deefd07eSFlora Cui 		if (adev->mode_info.crtcs[i])
546deefd07eSFlora Cui 			hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
547733ee71aSRyan Taylor 
548733ee71aSRyan Taylor 	drm_kms_helper_poll_fini(adev_to_drm(adev));
54930c1e391SFlora Cui 	drm_mode_config_cleanup(adev_to_drm(adev));
550733ee71aSRyan Taylor 
551733ee71aSRyan Taylor 	adev->mode_info.mode_config_initialized = false;
55230c1e391SFlora Cui 
553aeb81b62SThomas Weißschuh 	drm_edid_free(adev->mode_info.bios_hardcoded_edid);
55430c1e391SFlora Cui 	kfree(adev->amdgpu_vkms_output);
555733ee71aSRyan Taylor 	return 0;
556733ee71aSRyan Taylor }
557733ee71aSRyan Taylor 
amdgpu_vkms_hw_init(struct amdgpu_ip_block * ip_block)55858608034SSunil Khatri static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
559733ee71aSRyan Taylor {
56058608034SSunil Khatri 	struct amdgpu_device *adev = ip_block->adev;
561733ee71aSRyan Taylor 
562733ee71aSRyan Taylor 	switch (adev->asic_type) {
563733ee71aSRyan Taylor #ifdef CONFIG_DRM_AMDGPU_SI
564733ee71aSRyan Taylor 	case CHIP_TAHITI:
565733ee71aSRyan Taylor 	case CHIP_PITCAIRN:
566733ee71aSRyan Taylor 	case CHIP_VERDE:
567733ee71aSRyan Taylor 	case CHIP_OLAND:
568733ee71aSRyan Taylor 		dce_v6_0_disable_dce(adev);
569733ee71aSRyan Taylor 		break;
570733ee71aSRyan Taylor #endif
571733ee71aSRyan Taylor #ifdef CONFIG_DRM_AMDGPU_CIK
572733ee71aSRyan Taylor 	case CHIP_BONAIRE:
573733ee71aSRyan Taylor 	case CHIP_HAWAII:
574733ee71aSRyan Taylor 	case CHIP_KAVERI:
575733ee71aSRyan Taylor 	case CHIP_KABINI:
576733ee71aSRyan Taylor 	case CHIP_MULLINS:
577733ee71aSRyan Taylor 		dce_v8_0_disable_dce(adev);
578733ee71aSRyan Taylor 		break;
579733ee71aSRyan Taylor #endif
580733ee71aSRyan Taylor 	case CHIP_FIJI:
581733ee71aSRyan Taylor 	case CHIP_TONGA:
582733ee71aSRyan Taylor 		dce_v10_0_disable_dce(adev);
583733ee71aSRyan Taylor 		break;
584733ee71aSRyan Taylor 	case CHIP_CARRIZO:
585733ee71aSRyan Taylor 	case CHIP_STONEY:
586733ee71aSRyan Taylor 	case CHIP_POLARIS10:
587733ee71aSRyan Taylor 	case CHIP_POLARIS11:
588733ee71aSRyan Taylor 	case CHIP_VEGAM:
589733ee71aSRyan Taylor 		dce_v11_0_disable_dce(adev);
590733ee71aSRyan Taylor 		break;
591733ee71aSRyan Taylor 	case CHIP_TOPAZ:
592733ee71aSRyan Taylor #ifdef CONFIG_DRM_AMDGPU_SI
593733ee71aSRyan Taylor 	case CHIP_HAINAN:
594733ee71aSRyan Taylor #endif
595733ee71aSRyan Taylor 		/* no DCE */
596733ee71aSRyan Taylor 		break;
597733ee71aSRyan Taylor 	default:
598733ee71aSRyan Taylor 		break;
599733ee71aSRyan Taylor 	}
600733ee71aSRyan Taylor 	return 0;
601733ee71aSRyan Taylor }
602733ee71aSRyan Taylor 
amdgpu_vkms_hw_fini(struct amdgpu_ip_block * ip_block)603692d2cd1SSunil Khatri static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block)
604733ee71aSRyan Taylor {
605733ee71aSRyan Taylor 	return 0;
606733ee71aSRyan Taylor }
607733ee71aSRyan Taylor 
amdgpu_vkms_suspend(struct amdgpu_ip_block * ip_block)608982d7f9bSSunil Khatri static int amdgpu_vkms_suspend(struct amdgpu_ip_block *ip_block)
609733ee71aSRyan Taylor {
610982d7f9bSSunil Khatri 	struct amdgpu_device *adev = ip_block->adev;
611733ee71aSRyan Taylor 	int r;
612733ee71aSRyan Taylor 
613733ee71aSRyan Taylor 	r = drm_mode_config_helper_suspend(adev_to_drm(adev));
614733ee71aSRyan Taylor 	if (r)
615733ee71aSRyan Taylor 		return r;
616aa980de3SSunil Khatri 
617aa980de3SSunil Khatri 	return 0;
618733ee71aSRyan Taylor }
619733ee71aSRyan Taylor 
amdgpu_vkms_resume(struct amdgpu_ip_block * ip_block)6207feb4f3aSSunil Khatri static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block)
621733ee71aSRyan Taylor {
622733ee71aSRyan Taylor 	int r;
623733ee71aSRyan Taylor 
62458608034SSunil Khatri 	r = amdgpu_vkms_hw_init(ip_block);
625733ee71aSRyan Taylor 	if (r)
626733ee71aSRyan Taylor 		return r;
62758608034SSunil Khatri 	return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev));
628733ee71aSRyan Taylor }
629733ee71aSRyan Taylor 
amdgpu_vkms_is_idle(struct amdgpu_ip_block * ip_block)630*7dc34054SSunil Khatri static bool amdgpu_vkms_is_idle(struct amdgpu_ip_block *ip_block)
631733ee71aSRyan Taylor {
632733ee71aSRyan Taylor 	return true;
633733ee71aSRyan Taylor }
634733ee71aSRyan Taylor 
amdgpu_vkms_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)635f2ba8c3dSBoyuan Zhang static int amdgpu_vkms_set_clockgating_state(struct amdgpu_ip_block *ip_block,
636733ee71aSRyan Taylor 					  enum amd_clockgating_state state)
637733ee71aSRyan Taylor {
638733ee71aSRyan Taylor 	return 0;
639733ee71aSRyan Taylor }
640733ee71aSRyan Taylor 
amdgpu_vkms_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)64180d80511SBoyuan Zhang static int amdgpu_vkms_set_powergating_state(struct amdgpu_ip_block *ip_block,
642733ee71aSRyan Taylor 					  enum amd_powergating_state state)
643733ee71aSRyan Taylor {
644733ee71aSRyan Taylor 	return 0;
645733ee71aSRyan Taylor }
646733ee71aSRyan Taylor 
647733ee71aSRyan Taylor static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
648733ee71aSRyan Taylor 	.name = "amdgpu_vkms",
649733ee71aSRyan Taylor 	.sw_init = amdgpu_vkms_sw_init,
650733ee71aSRyan Taylor 	.sw_fini = amdgpu_vkms_sw_fini,
651733ee71aSRyan Taylor 	.hw_init = amdgpu_vkms_hw_init,
652733ee71aSRyan Taylor 	.hw_fini = amdgpu_vkms_hw_fini,
653733ee71aSRyan Taylor 	.suspend = amdgpu_vkms_suspend,
654733ee71aSRyan Taylor 	.resume = amdgpu_vkms_resume,
655733ee71aSRyan Taylor 	.is_idle = amdgpu_vkms_is_idle,
656733ee71aSRyan Taylor 	.set_clockgating_state = amdgpu_vkms_set_clockgating_state,
657733ee71aSRyan Taylor 	.set_powergating_state = amdgpu_vkms_set_powergating_state,
658733ee71aSRyan Taylor };
659733ee71aSRyan Taylor 
660995d629fSchenxuebing const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = {
661733ee71aSRyan Taylor 	.type = AMD_IP_BLOCK_TYPE_DCE,
662733ee71aSRyan Taylor 	.major = 1,
663733ee71aSRyan Taylor 	.minor = 0,
664733ee71aSRyan Taylor 	.rev = 0,
665733ee71aSRyan Taylor 	.funcs = &amdgpu_vkms_ip_funcs,
666733ee71aSRyan Taylor };
667733ee71aSRyan Taylor 
668