1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <drm/drm_crtc.h>
27 #include <drm/drm_vblank.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_dm.h"
31 #include "dc.h"
32 #include "amdgpu_securedisplay.h"
33 
34 static const char *const pipe_crc_sources[] = {
35 	"none",
36 	"crtc",
37 	"crtc dither",
38 	"dprx",
39 	"dprx dither",
40 	"auto",
41 };
42 
43 static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
44 {
45 	if (!source || !strcmp(source, "none"))
46 		return AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
47 	if (!strcmp(source, "auto") || !strcmp(source, "crtc"))
48 		return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC;
49 	if (!strcmp(source, "dprx"))
50 		return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX;
51 	if (!strcmp(source, "crtc dither"))
52 		return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER;
53 	if (!strcmp(source, "dprx dither"))
54 		return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER;
55 
56 	return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
57 }
58 
59 static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)
60 {
61 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) ||
62 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER);
63 }
64 
65 static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)
66 {
67 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) ||
68 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER);
69 }
70 
71 static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)
72 {
73 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) ||
74 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) ||
75 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE);
76 }
77 
78 const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
79 						  size_t *count)
80 {
81 	*count = ARRAY_SIZE(pipe_crc_sources);
82 	return pipe_crc_sources;
83 }
84 
85 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
86 static void update_phy_id_mapping(struct amdgpu_device *adev)
87 {
88 	struct drm_device *ddev = adev_to_drm(adev);
89 	struct amdgpu_display_manager *dm = &adev->dm;
90 	struct drm_connector *connector;
91 	struct amdgpu_dm_connector *aconnector;
92 	struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL};
93 	struct drm_connector_list_iter iter;
94 	uint8_t idx = 0, idx_2 = 0, connector_cnt = 0;
95 
96 	dm->secure_display_ctx.phy_mapping_updated = false;
97 
98 	mutex_lock(&ddev->mode_config.mutex);
99 	drm_connector_list_iter_begin(ddev, &iter);
100 	drm_for_each_connector_iter(connector, &iter) {
101 
102 		if (connector->status != connector_status_connected)
103 			continue;
104 
105 		if (idx >= AMDGPU_DM_MAX_CRTC) {
106 			DRM_WARN("%s connected connectors exceed max crtc\n", __func__);
107 			mutex_unlock(&ddev->mode_config.mutex);
108 			return;
109 		}
110 
111 		aconnector = to_amdgpu_dm_connector(connector);
112 
113 		sort_connector[idx] = aconnector;
114 		idx++;
115 		connector_cnt++;
116 	}
117 	drm_connector_list_iter_end(&iter);
118 
119 	/* sort connectors by link_enc_hw_instance first */
120 	for (idx = connector_cnt; idx > 1 ; idx--) {
121 		for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) {
122 			if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
123 			    sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
124 				swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
125 		}
126 	}
127 
128 	/*
129 	 * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already
130 	 * sorted together above.
131 	 */
132 	for (idx = 0; idx < connector_cnt; /*Do nothing*/) {
133 		if (sort_connector[idx]->mst_root) {
134 			uint8_t i, j, k;
135 			uint8_t mst_con_cnt = 1;
136 
137 			for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) {
138 				if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
139 					mst_con_cnt++;
140 				else
141 					break;
142 			}
143 
144 			for (i = mst_con_cnt; i > 1; i--) {
145 				for (j = idx; j < (idx + i - 2); j++) {
146 					int mstb_lct = sort_connector[j]->mst_output_port->parent->lct;
147 					int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
148 					u8 *rad;
149 					u8 *next_rad;
150 					bool swap = false;
151 
152 					/* Sort by mst tree depth first. Then compare RAD if depth is the same*/
153 					if (mstb_lct > next_mstb_lct) {
154 						swap = true;
155 					} else if (mstb_lct == next_mstb_lct) {
156 						if (mstb_lct == 1) {
157 							if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
158 								swap = true;
159 						} else if (mstb_lct > 1) {
160 							rad = sort_connector[j]->mst_output_port->parent->rad;
161 							next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
162 
163 							for (k = 0; k < mstb_lct - 1; k++) {
164 								int shift = (k % 2) ? 0 : 4;
165 								int port_num = (rad[k / 2] >> shift) & 0xf;
166 								int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
167 
168 								if (port_num > next_port_num) {
169 									swap = true;
170 									break;
171 								}
172 							}
173 						} else {
174 							DRM_ERROR("MST LCT shouldn't be set as < 1");
175 							mutex_unlock(&ddev->mode_config.mutex);
176 							return;
177 						}
178 					}
179 
180 					if (swap)
181 						swap(sort_connector[j], sort_connector[j + 1]);
182 				}
183 			}
184 
185 			idx += mst_con_cnt;
186 		} else {
187 			idx++;
188 		}
189 	}
190 
191 	/* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
192 	memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping));
193 	for (idx = 0; idx < connector_cnt; idx++) {
194 		aconnector = sort_connector[idx];
195 
196 		dm->secure_display_ctx.phy_id_mapping[idx].assigned = true;
197 		dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false;
198 		dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst;
199 
200 		if (sort_connector[idx]->mst_root) {
201 			dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true;
202 			dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct;
203 			dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num;
204 			memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad,
205 				aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad));
206 		}
207 	}
208 	mutex_unlock(&ddev->mode_config.mutex);
209 
210 	dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt;
211 	dm->secure_display_ctx.phy_mapping_updated = true;
212 }
213 
214 static bool get_phy_id(struct amdgpu_display_manager *dm,
215 			struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
216 {
217 	int idx, idx_2;
218 	bool found = false;
219 
220 	/*
221 	 * Assume secure display start after all connectors are probed. The connection
222 	 * config is static as well
223 	 */
224 	if (!dm->secure_display_ctx.phy_mapping_updated) {
225 		DRM_WARN("%s Should update the phy id table before get it's value", __func__);
226 		return false;
227 	}
228 
229 	for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) {
230 		if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
231 			DRM_ERROR("phy_id_mapping[%d] should be assigned", idx);
232 			return false;
233 		}
234 
235 		if (aconnector->dc_link->link_enc_hw_inst ==
236 				dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) {
237 			if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
238 				found = true;
239 				goto out;
240 			} else {
241 				/* Could caused by wrongly pass mst root connector */
242 				if (!aconnector->mst_output_port) {
243 					DRM_ERROR("%s Check mst case but connector without a port assigned", __func__);
244 					return false;
245 				}
246 
247 				if (aconnector->mst_root &&
248 					aconnector->mst_root->mst_mgr.mst_primary == NULL) {
249 					DRM_WARN("%s pass in a stale mst connector", __func__);
250 				}
251 
252 				if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
253 					aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) {
254 					if (aconnector->mst_output_port->parent->lct == 1) {
255 						found = true;
256 						goto out;
257 					} else if (aconnector->mst_output_port->parent->lct > 1) {
258 						/* Check RAD */
259 						for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) {
260 							int shift = (idx_2 % 2) ? 0 : 4;
261 							int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf;
262 							int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
263 
264 							if (port_num != port_num2)
265 								break;
266 						}
267 
268 						if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
269 							found = true;
270 							goto out;
271 						}
272 					} else {
273 						DRM_ERROR("lCT should be >= 1");
274 						return false;
275 					}
276 				}
277 			}
278 		}
279 	}
280 
281 out:
282 	if (found) {
283 		DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
284 		*phy_id = idx;
285 	} else {
286 		DRM_WARN("Can't find associated phy ID");
287 		return false;
288 	}
289 
290 	return true;
291 }
292 
293 static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
294 {
295 	struct drm_device *drm_dev = crtc->dev;
296 	struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
297 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
298 	bool was_activated;
299 	struct amdgpu_dm_connector *aconnector;
300 	uint8_t phy_id;
301 
302 	spin_lock_irq(&drm_dev->event_lock);
303 	was_activated = acrtc->dm_irq_params.window_param.activated;
304 	acrtc->dm_irq_params.window_param.x_start = 0;
305 	acrtc->dm_irq_params.window_param.y_start = 0;
306 	acrtc->dm_irq_params.window_param.x_end = 0;
307 	acrtc->dm_irq_params.window_param.y_end = 0;
308 	acrtc->dm_irq_params.window_param.activated = false;
309 	acrtc->dm_irq_params.window_param.update_win = false;
310 	acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
311 	spin_unlock_irq(&drm_dev->event_lock);
312 
313 	/* Disable secure_display if it was enabled */
314 	if (was_activated) {
315 		/* stop ROI update on this crtc */
316 		flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
317 		flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
318 
319 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
320 
321 		if (aconnector && get_phy_id(dm, aconnector, &phy_id))
322 			dc_stream_forward_crc_window(stream, NULL, phy_id, true);
323 		else
324 			DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
325 	}
326 }
327 
328 static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
329 {
330 	struct secure_display_crtc_context *crtc_ctx;
331 	struct psp_context *psp;
332 	struct ta_securedisplay_cmd *securedisplay_cmd;
333 	struct drm_crtc *crtc;
334 	struct dc_stream_state *stream;
335 	struct amdgpu_dm_connector *aconnector;
336 	uint8_t phy_inst;
337 	struct amdgpu_display_manager *dm;
338 	int ret;
339 
340 	crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
341 	crtc = crtc_ctx->crtc;
342 
343 	if (!crtc)
344 		return;
345 
346 	psp = &drm_to_adev(crtc->dev)->psp;
347 
348 	if (!psp->securedisplay_context.context.initialized) {
349 		DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n");
350 		return;
351 	}
352 
353 	dm = &drm_to_adev(crtc->dev)->dm;
354 	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
355 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
356 	if (!aconnector)
357 		return;
358 
359 	mutex_lock(&crtc->dev->mode_config.mutex);
360 	if (!get_phy_id(dm, aconnector, &phy_inst)) {
361 		DRM_WARN("%s Can't find mapping phy id!", __func__);
362 		mutex_unlock(&crtc->dev->mode_config.mutex);
363 		return;
364 	}
365 	mutex_unlock(&crtc->dev->mode_config.mutex);
366 
367 	/* need lock for multiple crtcs to use the command buffer */
368 	mutex_lock(&psp->securedisplay_context.mutex);
369 
370 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
371 						TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
372 
373 	securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
374 
375 	/* PSP TA is expected to finish data transmission over I2C within current frame,
376 	 * even there are up to 4 crtcs request to send in this frame.
377 	 */
378 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
379 
380 	if (!ret) {
381 		if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
382 			psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
383 	}
384 
385 	mutex_unlock(&psp->securedisplay_context.mutex);
386 }
387 
388 static void
389 amdgpu_dm_forward_crc_window(struct work_struct *work)
390 {
391 	struct secure_display_crtc_context *crtc_ctx;
392 	struct amdgpu_display_manager *dm;
393 	struct drm_crtc *crtc;
394 	struct dc_stream_state *stream;
395 	struct amdgpu_dm_connector *aconnector;
396 	uint8_t phy_id;
397 
398 	crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
399 	crtc = crtc_ctx->crtc;
400 
401 	if (!crtc)
402 		return;
403 
404 	dm = &drm_to_adev(crtc->dev)->dm;
405 	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
406 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
407 
408 	if (!aconnector)
409 		return;
410 
411 	mutex_lock(&crtc->dev->mode_config.mutex);
412 	if (!get_phy_id(dm, aconnector, &phy_id)) {
413 		DRM_WARN("%s Can't find mapping phy id!", __func__);
414 		mutex_unlock(&crtc->dev->mode_config.mutex);
415 		return;
416 	}
417 	mutex_unlock(&crtc->dev->mode_config.mutex);
418 
419 	mutex_lock(&dm->dc_lock);
420 	dc_stream_forward_crc_window(stream, &crtc_ctx->rect,
421 		phy_id, false);
422 	mutex_unlock(&dm->dc_lock);
423 }
424 
425 bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
426 {
427 	struct drm_device *drm_dev = crtc->dev;
428 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
429 	bool ret = false;
430 
431 	spin_lock_irq(&drm_dev->event_lock);
432 	ret = acrtc->dm_irq_params.window_param.activated;
433 	spin_unlock_irq(&drm_dev->event_lock);
434 
435 	return ret;
436 }
437 #endif
438 
439 int
440 amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
441 				 size_t *values_cnt)
442 {
443 	enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
444 
445 	if (source < 0) {
446 		DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
447 				 src_name, crtc->index);
448 		return -EINVAL;
449 	}
450 
451 	*values_cnt = 3;
452 	return 0;
453 }
454 
455 int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
456 					struct dm_crtc_state *dm_crtc_state,
457 					enum amdgpu_dm_pipe_crc_source source)
458 {
459 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
460 	struct dc_stream_state *stream_state = dm_crtc_state->stream;
461 	bool enable = amdgpu_dm_is_valid_crc_source(source);
462 	int ret = 0;
463 
464 	/* Configuration will be deferred to stream enable. */
465 	if (!stream_state)
466 		return -EINVAL;
467 
468 	mutex_lock(&adev->dm.dc_lock);
469 
470 	/* Enable or disable CRTC CRC generation */
471 	if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
472 		if (!dc_stream_configure_crc(stream_state->ctx->dc,
473 					     stream_state, NULL, enable, enable)) {
474 			ret = -EINVAL;
475 			goto unlock;
476 		}
477 	}
478 
479 	/* Configure dithering */
480 	if (!dm_need_crc_dither(source)) {
481 		dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
482 		dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
483 					    DYN_EXPANSION_DISABLE);
484 	} else {
485 		dc_stream_set_dither_option(stream_state,
486 					    DITHER_OPTION_DEFAULT);
487 		dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
488 					    DYN_EXPANSION_AUTO);
489 	}
490 
491 unlock:
492 	mutex_unlock(&adev->dm.dc_lock);
493 
494 	return ret;
495 }
496 
497 int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
498 {
499 	enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
500 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
501 	struct drm_crtc_commit *commit;
502 	struct dm_crtc_state *crtc_state;
503 	struct drm_device *drm_dev = crtc->dev;
504 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
505 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
506 	struct amdgpu_display_manager *dm = &adev->dm;
507 #endif
508 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
509 	struct drm_dp_aux *aux = NULL;
510 	bool enable = false;
511 	bool enabled = false;
512 	int ret = 0;
513 
514 	if (source < 0) {
515 		DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
516 				 src_name, crtc->index);
517 		return -EINVAL;
518 	}
519 
520 	ret = drm_modeset_lock(&crtc->mutex, NULL);
521 	if (ret)
522 		return ret;
523 
524 	spin_lock(&crtc->commit_lock);
525 	commit = list_first_entry_or_null(&crtc->commit_list,
526 					  struct drm_crtc_commit, commit_entry);
527 	if (commit)
528 		drm_crtc_commit_get(commit);
529 	spin_unlock(&crtc->commit_lock);
530 
531 	if (commit) {
532 		/*
533 		 * Need to wait for all outstanding programming to complete
534 		 * in commit tail since it can modify CRC related fields and
535 		 * hardware state. Since we're holding the CRTC lock we're
536 		 * guaranteed that no other commit work can be queued off
537 		 * before we modify the state below.
538 		 */
539 		ret = wait_for_completion_interruptible_timeout(
540 			&commit->hw_done, 10 * HZ);
541 		if (ret)
542 			goto cleanup;
543 	}
544 
545 	enable = amdgpu_dm_is_valid_crc_source(source);
546 	crtc_state = to_dm_crtc_state(crtc->state);
547 	spin_lock_irq(&drm_dev->event_lock);
548 	cur_crc_src = acrtc->dm_irq_params.crc_src;
549 	spin_unlock_irq(&drm_dev->event_lock);
550 
551 	/*
552 	 * USER REQ SRC | CURRENT SRC | BEHAVIOR
553 	 * -----------------------------
554 	 * None         | None        | Do nothing
555 	 * None         | CRTC        | Disable CRTC CRC, set default to dither
556 	 * None         | DPRX        | Disable DPRX CRC, need 'aux', set default to dither
557 	 * None         | CRTC DITHER | Disable CRTC CRC
558 	 * None         | DPRX DITHER | Disable DPRX CRC, need 'aux'
559 	 * CRTC         | XXXX        | Enable CRTC CRC, no dither
560 	 * DPRX         | XXXX        | Enable DPRX CRC, need 'aux', no dither
561 	 * CRTC DITHER  | XXXX        | Enable CRTC CRC, set dither
562 	 * DPRX DITHER  | XXXX        | Enable DPRX CRC, need 'aux', set dither
563 	 */
564 	if (dm_is_crc_source_dprx(source) ||
565 	    (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE &&
566 	     dm_is_crc_source_dprx(cur_crc_src))) {
567 		struct amdgpu_dm_connector *aconn = NULL;
568 		struct drm_connector *connector;
569 		struct drm_connector_list_iter conn_iter;
570 
571 		drm_connector_list_iter_begin(crtc->dev, &conn_iter);
572 		drm_for_each_connector_iter(connector, &conn_iter) {
573 			if (!connector->state || connector->state->crtc != crtc)
574 				continue;
575 
576 			if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
577 				continue;
578 
579 			aconn = to_amdgpu_dm_connector(connector);
580 			break;
581 		}
582 		drm_connector_list_iter_end(&conn_iter);
583 
584 		if (!aconn) {
585 			DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index);
586 			ret = -EINVAL;
587 			goto cleanup;
588 		}
589 
590 		aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux;
591 
592 		if (!aux) {
593 			DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
594 			ret = -EINVAL;
595 			goto cleanup;
596 		}
597 
598 		if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
599 				(aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
600 			DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
601 			ret = -EINVAL;
602 			goto cleanup;
603 		}
604 
605 	}
606 
607 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
608 	/* Reset secure_display when we change crc source from debugfs */
609 	amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
610 #endif
611 
612 	if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
613 		ret = -EINVAL;
614 		goto cleanup;
615 	}
616 
617 	/*
618 	 * Reading the CRC requires the vblank interrupt handler to be
619 	 * enabled. Keep a reference until CRC capture stops.
620 	 */
621 	enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
622 	if (!enabled && enable) {
623 		ret = drm_crtc_vblank_get(crtc);
624 		if (ret)
625 			goto cleanup;
626 
627 		if (dm_is_crc_source_dprx(source)) {
628 			if (drm_dp_start_crc(aux, crtc)) {
629 				DRM_DEBUG_DRIVER("dp start crc failed\n");
630 				ret = -EINVAL;
631 				goto cleanup;
632 			}
633 		}
634 	} else if (enabled && !enable) {
635 		drm_crtc_vblank_put(crtc);
636 		if (dm_is_crc_source_dprx(source)) {
637 			if (drm_dp_stop_crc(aux)) {
638 				DRM_DEBUG_DRIVER("dp stop crc failed\n");
639 				ret = -EINVAL;
640 				goto cleanup;
641 			}
642 		}
643 	}
644 
645 	spin_lock_irq(&drm_dev->event_lock);
646 	acrtc->dm_irq_params.crc_src = source;
647 	spin_unlock_irq(&drm_dev->event_lock);
648 
649 	/* Reset crc_skipped on dm state */
650 	crtc_state->crc_skip_count = 0;
651 
652 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
653 	/* Initialize phy id mapping table for secure display*/
654 	if (!dm->secure_display_ctx.phy_mapping_updated)
655 		update_phy_id_mapping(adev);
656 #endif
657 
658 cleanup:
659 	if (commit)
660 		drm_crtc_commit_put(commit);
661 
662 	drm_modeset_unlock(&crtc->mutex);
663 
664 	return ret;
665 }
666 
667 /**
668  * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC.
669  * @crtc: DRM CRTC object.
670  *
671  * This function should be called at the end of a vblank, when the fb has been
672  * fully processed through the pipe.
673  */
674 void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
675 {
676 	struct dm_crtc_state *crtc_state;
677 	struct dc_stream_state *stream_state;
678 	struct drm_device *drm_dev = NULL;
679 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
680 	struct amdgpu_crtc *acrtc = NULL;
681 	uint32_t crcs[3];
682 	unsigned long flags;
683 
684 	if (crtc == NULL)
685 		return;
686 
687 	crtc_state = to_dm_crtc_state(crtc->state);
688 	stream_state = crtc_state->stream;
689 	acrtc = to_amdgpu_crtc(crtc);
690 	drm_dev = crtc->dev;
691 
692 	spin_lock_irqsave(&drm_dev->event_lock, flags);
693 	cur_crc_src = acrtc->dm_irq_params.crc_src;
694 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
695 
696 	/* Early return if CRC capture is not enabled. */
697 	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
698 		return;
699 
700 	/*
701 	 * Since flipping and crc enablement happen asynchronously, we - more
702 	 * often than not - will be returning an 'uncooked' crc on first frame.
703 	 * Probably because hw isn't ready yet. For added security, skip the
704 	 * first two CRC values.
705 	 */
706 	if (crtc_state->crc_skip_count < 2) {
707 		crtc_state->crc_skip_count += 1;
708 		return;
709 	}
710 
711 	if (dm_is_crc_source_crtc(cur_crc_src)) {
712 		if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
713 				       &crcs[0], &crcs[1], &crcs[2]))
714 			return;
715 
716 		drm_crtc_add_crc_entry(crtc, true,
717 				       drm_crtc_accurate_vblank_count(crtc), crcs);
718 	}
719 }
720 
721 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
722 void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
723 {
724 	struct drm_device *drm_dev = NULL;
725 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
726 	struct amdgpu_crtc *acrtc = NULL;
727 	struct amdgpu_device *adev = NULL;
728 	struct secure_display_crtc_context *crtc_ctx = NULL;
729 	unsigned long flags1;
730 
731 	if (crtc == NULL)
732 		return;
733 
734 	acrtc = to_amdgpu_crtc(crtc);
735 	adev = drm_to_adev(crtc->dev);
736 	drm_dev = crtc->dev;
737 
738 	spin_lock_irqsave(&drm_dev->event_lock, flags1);
739 	cur_crc_src = acrtc->dm_irq_params.crc_src;
740 
741 	/* Early return if CRC capture is not enabled. */
742 	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
743 		!dm_is_crc_source_crtc(cur_crc_src))
744 		goto cleanup;
745 
746 	if (!acrtc->dm_irq_params.window_param.activated)
747 		goto cleanup;
748 
749 	if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
750 		acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
751 		goto cleanup;
752 	}
753 
754 	crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
755 	if (WARN_ON(crtc_ctx->crtc != crtc)) {
756 		/* We have set the crtc when creating secure_display_crtc_context,
757 		 * don't expect it to be changed here.
758 		 */
759 		crtc_ctx->crtc = crtc;
760 	}
761 
762 	if (acrtc->dm_irq_params.window_param.update_win) {
763 		/* prepare work for dmub to update ROI */
764 		crtc_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start;
765 		crtc_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start;
766 		crtc_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end -
767 								acrtc->dm_irq_params.window_param.x_start;
768 		crtc_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end -
769 								acrtc->dm_irq_params.window_param.y_start;
770 		schedule_work(&crtc_ctx->forward_roi_work);
771 
772 		acrtc->dm_irq_params.window_param.update_win = false;
773 
774 		/* Statically skip 1 frame, because we may need to wait below things
775 		 * before sending ROI to dmub:
776 		 * 1. We defer the work by using system workqueue.
777 		 * 2. We may need to wait for dc_lock before accessing dmub.
778 		 */
779 		acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;
780 
781 	} else {
782 		/* prepare work for psp to read ROI/CRC and send to I2C */
783 		schedule_work(&crtc_ctx->notify_ta_work);
784 	}
785 
786 cleanup:
787 	spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
788 }
789 
790 void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
791 {
792 	struct secure_display_crtc_context *crtc_ctx = NULL;
793 	int i;
794 
795 	crtc_ctx = kcalloc(adev->mode_info.num_crtc,
796 				      sizeof(struct secure_display_crtc_context),
797 				      GFP_KERNEL);
798 
799 	if (!crtc_ctx) {
800 		adev->dm.secure_display_ctx.crtc_ctx = NULL;
801 		return;
802 	}
803 
804 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
805 		INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
806 		INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
807 		crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
808 	}
809 
810 	adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
811 	return;
812 }
813 #endif
814