1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <drm/drm_crtc.h>
27 #include <drm/drm_vblank.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_dm.h"
31 #include "dc.h"
32 #include "amdgpu_securedisplay.h"
33 
34 static const char *const pipe_crc_sources[] = {
35 	"none",
36 	"crtc",
37 	"crtc dither",
38 	"dprx",
39 	"dprx dither",
40 	"auto",
41 };
42 
43 static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
44 {
45 	if (!source || !strcmp(source, "none"))
46 		return AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
47 	if (!strcmp(source, "auto") || !strcmp(source, "crtc"))
48 		return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC;
49 	if (!strcmp(source, "dprx"))
50 		return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX;
51 	if (!strcmp(source, "crtc dither"))
52 		return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER;
53 	if (!strcmp(source, "dprx dither"))
54 		return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER;
55 
56 	return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
57 }
58 
59 static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)
60 {
61 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) ||
62 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER);
63 }
64 
65 static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)
66 {
67 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) ||
68 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER);
69 }
70 
71 static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)
72 {
73 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) ||
74 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) ||
75 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE);
76 }
77 
78 const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
79 						  size_t *count)
80 {
81 	*count = ARRAY_SIZE(pipe_crc_sources);
82 	return pipe_crc_sources;
83 }
84 
85 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
86 static void update_phy_id_mapping(struct amdgpu_device *adev)
87 {
88 	struct drm_device *ddev = adev_to_drm(adev);
89 	struct amdgpu_display_manager *dm = &adev->dm;
90 	struct drm_connector *connector;
91 	struct amdgpu_dm_connector *aconnector;
92 	struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL};
93 	struct drm_connector_list_iter iter;
94 	uint8_t idx = 0, idx_2 = 0, connector_cnt = 0;
95 
96 	dm->secure_display_ctx.phy_mapping_updated = false;
97 
98 	mutex_lock(&ddev->mode_config.mutex);
99 	drm_connector_list_iter_begin(ddev, &iter);
100 	drm_for_each_connector_iter(connector, &iter) {
101 
102 		if (connector->status != connector_status_connected)
103 			continue;
104 
105 		if (idx >= AMDGPU_DM_MAX_CRTC) {
106 			DRM_WARN("%s connected connectors exceed max crtc\n", __func__);
107 			mutex_unlock(&ddev->mode_config.mutex);
108 			return;
109 		}
110 
111 		aconnector = to_amdgpu_dm_connector(connector);
112 
113 		sort_connector[idx] = aconnector;
114 		idx++;
115 		connector_cnt++;
116 	}
117 	drm_connector_list_iter_end(&iter);
118 
119 	/* sort connectors by link_enc_hw_instance first */
120 	for (idx = connector_cnt; idx > 1 ; idx--) {
121 		for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) {
122 			if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
123 			    sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
124 				swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
125 		}
126 	}
127 
128 	/*
129 	 * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already
130 	 * sorted together above.
131 	 */
132 	for (idx = 0; idx < connector_cnt; /*Do nothing*/) {
133 		if (sort_connector[idx]->mst_root) {
134 			uint8_t i, j, k;
135 			uint8_t mst_con_cnt = 1;
136 
137 			for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) {
138 				if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
139 					mst_con_cnt++;
140 				else
141 					break;
142 			}
143 
144 			for (i = mst_con_cnt; i > 1; i--) {
145 				for (j = idx; j < (idx + i - 2); j++) {
146 					int mstb_lct = sort_connector[j]->mst_output_port->parent->lct;
147 					int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
148 					u8 *rad;
149 					u8 *next_rad;
150 					bool swap = false;
151 
152 					/* Sort by mst tree depth first. Then compare RAD if depth is the same*/
153 					if (mstb_lct > next_mstb_lct) {
154 						swap = true;
155 					} else if (mstb_lct == next_mstb_lct) {
156 						if (mstb_lct == 1) {
157 							if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
158 								swap = true;
159 						} else if (mstb_lct > 1) {
160 							rad = sort_connector[j]->mst_output_port->parent->rad;
161 							next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
162 
163 							for (k = 0; k < mstb_lct - 1; k++) {
164 								int shift = (k % 2) ? 0 : 4;
165 								int port_num = (rad[k / 2] >> shift) & 0xf;
166 								int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
167 
168 								if (port_num > next_port_num) {
169 									swap = true;
170 									break;
171 								}
172 							}
173 						} else {
174 							DRM_ERROR("MST LCT shouldn't be set as < 1");
175 							mutex_unlock(&ddev->mode_config.mutex);
176 							return;
177 						}
178 					}
179 
180 					if (swap)
181 						swap(sort_connector[j], sort_connector[j + 1]);
182 				}
183 			}
184 
185 			idx += mst_con_cnt;
186 		} else {
187 			idx++;
188 		}
189 	}
190 
191 	/* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
192 	memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping));
193 	for (idx = 0; idx < connector_cnt; idx++) {
194 		aconnector = sort_connector[idx];
195 
196 		dm->secure_display_ctx.phy_id_mapping[idx].assigned = true;
197 		dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false;
198 		dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst;
199 
200 		if (sort_connector[idx]->mst_root) {
201 			dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true;
202 			dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct;
203 			dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num;
204 			memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad,
205 				aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad));
206 		}
207 	}
208 	mutex_unlock(&ddev->mode_config.mutex);
209 
210 	dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt;
211 	dm->secure_display_ctx.phy_mapping_updated = true;
212 }
213 
214 static bool get_phy_id(struct amdgpu_display_manager *dm,
215 			struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
216 {
217 	int idx, idx_2;
218 	bool found = false;
219 
220 	/*
221 	 * Assume secure display start after all connectors are probed. The connection
222 	 * config is static as well
223 	 */
224 	if (!dm->secure_display_ctx.phy_mapping_updated) {
225 		DRM_WARN("%s Should update the phy id table before get it's value", __func__);
226 		return false;
227 	}
228 
229 	for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) {
230 		if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
231 			DRM_ERROR("phy_id_mapping[%d] should be assigned", idx);
232 			return false;
233 		}
234 
235 		if (aconnector->dc_link->link_enc_hw_inst ==
236 				dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) {
237 			if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
238 				found = true;
239 				goto out;
240 			} else {
241 				/* Could caused by wrongly pass mst root connector */
242 				if (!aconnector->mst_output_port) {
243 					DRM_ERROR("%s Check mst case but connector without a port assigned", __func__);
244 					return false;
245 				}
246 
247 				if (aconnector->mst_root &&
248 					aconnector->mst_root->mst_mgr.mst_primary == NULL) {
249 					DRM_WARN("%s pass in a stale mst connector", __func__);
250 				}
251 
252 				if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
253 					aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) {
254 					if (aconnector->mst_output_port->parent->lct == 1) {
255 						found = true;
256 						goto out;
257 					} else if (aconnector->mst_output_port->parent->lct > 1) {
258 						/* Check RAD */
259 						for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) {
260 							int shift = (idx_2 % 2) ? 0 : 4;
261 							int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf;
262 							int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
263 
264 							if (port_num != port_num2)
265 								break;
266 						}
267 
268 						if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
269 							found = true;
270 							goto out;
271 						}
272 					} else {
273 						DRM_ERROR("lCT should be >= 1");
274 						return false;
275 					}
276 				}
277 			}
278 		}
279 	}
280 
281 out:
282 	if (found) {
283 		DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
284 		*phy_id = idx;
285 	} else {
286 		DRM_WARN("Can't find associated phy ID");
287 		return false;
288 	}
289 
290 	return true;
291 }
292 
293 static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
294 {
295 	struct drm_device *drm_dev = crtc->dev;
296 	struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
297 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
298 	struct amdgpu_dm_connector *aconnector;
299 	bool was_activated;
300 	uint8_t phy_id;
301 	unsigned long flags;
302 	int i;
303 
304 	spin_lock_irqsave(&drm_dev->event_lock, flags);
305 	was_activated = acrtc->dm_irq_params.crc_window_activated;
306 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
307 		acrtc->dm_irq_params.window_param[i].x_start = 0;
308 		acrtc->dm_irq_params.window_param[i].y_start = 0;
309 		acrtc->dm_irq_params.window_param[i].x_end = 0;
310 		acrtc->dm_irq_params.window_param[i].y_end = 0;
311 		acrtc->dm_irq_params.window_param[i].enable = false;
312 		acrtc->dm_irq_params.window_param[i].update_win = false;
313 		acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0;
314 	}
315 	acrtc->dm_irq_params.crc_window_activated = false;
316 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
317 
318 	/* Disable secure_display if it was enabled */
319 	if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) {
320 		/* stop ROI update on this crtc */
321 		flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
322 		flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
323 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
324 
325 		if (aconnector && get_phy_id(dm, aconnector, &phy_id)) {
326 			if (dm->secure_display_ctx.support_mul_roi)
327 				dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true);
328 			else
329 				dc_stream_forward_crc_window(stream, NULL, phy_id, true);
330 		} else {
331 			DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
332 		}
333 	}
334 }
335 
336 static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
337 {
338 	struct secure_display_crtc_context *crtc_ctx;
339 	struct psp_context *psp;
340 	struct ta_securedisplay_cmd *securedisplay_cmd;
341 	struct drm_crtc *crtc;
342 	struct dc_stream_state *stream;
343 	struct amdgpu_dm_connector *aconnector;
344 	uint8_t phy_inst;
345 	struct amdgpu_display_manager *dm;
346 	struct crc_data crc_cpy[MAX_CRC_WINDOW_NUM];
347 	unsigned long flags;
348 	uint8_t roi_idx = 0;
349 	int ret;
350 	int i;
351 
352 	crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
353 	crtc = crtc_ctx->crtc;
354 
355 	if (!crtc)
356 		return;
357 
358 	psp = &drm_to_adev(crtc->dev)->psp;
359 
360 	if (!psp->securedisplay_context.context.initialized) {
361 		DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n");
362 		return;
363 	}
364 
365 	dm = &drm_to_adev(crtc->dev)->dm;
366 	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
367 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
368 	if (!aconnector)
369 		return;
370 
371 	mutex_lock(&crtc->dev->mode_config.mutex);
372 	if (!get_phy_id(dm, aconnector, &phy_inst)) {
373 		DRM_WARN("%s Can't find mapping phy id!", __func__);
374 		mutex_unlock(&crtc->dev->mode_config.mutex);
375 		return;
376 	}
377 	mutex_unlock(&crtc->dev->mode_config.mutex);
378 
379 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
380 	memcpy(crc_cpy, crtc_ctx->crc_info.crc, sizeof(struct crc_data) * MAX_CRC_WINDOW_NUM);
381 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
382 
383 	/* need lock for multiple crtcs to use the command buffer */
384 	mutex_lock(&psp->securedisplay_context.mutex);
385 	/* PSP TA is expected to finish data transmission over I2C within current frame,
386 	 * even there are up to 4 crtcs request to send in this frame.
387 	 */
388 	if (dm->secure_display_ctx.support_mul_roi) {
389 		psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
390 							TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
391 
392 		securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.phy_id = phy_inst;
393 
394 		for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
395 			if (crc_cpy[i].crc_ready)
396 				roi_idx |= 1 << i;
397 		}
398 		securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx;
399 
400 		ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
401 	} else {
402 		psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
403 							TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
404 
405 		securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
406 
407 		ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
408 	}
409 
410 	if (!ret) {
411 		if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
412 			psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
413 	}
414 
415 	mutex_unlock(&psp->securedisplay_context.mutex);
416 }
417 
418 static void
419 amdgpu_dm_forward_crc_window(struct work_struct *work)
420 {
421 	struct secure_display_crtc_context *crtc_ctx;
422 	struct amdgpu_display_manager *dm;
423 	struct drm_crtc *crtc;
424 	struct dc_stream_state *stream;
425 	struct amdgpu_dm_connector *aconnector;
426 	struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM];
427 	unsigned long flags;
428 	uint8_t phy_id;
429 
430 	crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
431 	crtc = crtc_ctx->crtc;
432 
433 	if (!crtc)
434 		return;
435 
436 	dm = &drm_to_adev(crtc->dev)->dm;
437 	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
438 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
439 
440 	if (!aconnector)
441 		return;
442 
443 	mutex_lock(&crtc->dev->mode_config.mutex);
444 	if (!get_phy_id(dm, aconnector, &phy_id)) {
445 		DRM_WARN("%s Can't find mapping phy id!", __func__);
446 		mutex_unlock(&crtc->dev->mode_config.mutex);
447 		return;
448 	}
449 	mutex_unlock(&crtc->dev->mode_config.mutex);
450 
451 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
452 	memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM);
453 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
454 
455 	mutex_lock(&dm->dc_lock);
456 	if (dm->secure_display_ctx.support_mul_roi)
457 		dc_stream_forward_multiple_crc_window(stream, roi_cpy,
458 			phy_id, false);
459 	else
460 		dc_stream_forward_crc_window(stream, &roi_cpy[0].rect,
461 			phy_id, false);
462 	mutex_unlock(&dm->dc_lock);
463 }
464 
465 bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
466 {
467 	struct drm_device *drm_dev = crtc->dev;
468 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
469 	bool ret = false;
470 
471 	spin_lock_irq(&drm_dev->event_lock);
472 	ret = acrtc->dm_irq_params.crc_window_activated;
473 	spin_unlock_irq(&drm_dev->event_lock);
474 
475 	return ret;
476 }
477 #endif
478 
479 int
480 amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
481 				 size_t *values_cnt)
482 {
483 	enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
484 
485 	if (source < 0) {
486 		DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
487 				 src_name, crtc->index);
488 		return -EINVAL;
489 	}
490 
491 	*values_cnt = 3;
492 	return 0;
493 }
494 
495 int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
496 					struct dm_crtc_state *dm_crtc_state,
497 					enum amdgpu_dm_pipe_crc_source source)
498 {
499 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
500 	struct dc_stream_state *stream_state = dm_crtc_state->stream;
501 	bool enable = amdgpu_dm_is_valid_crc_source(source);
502 	int ret = 0;
503 
504 	/* Configuration will be deferred to stream enable. */
505 	if (!stream_state)
506 		return -EINVAL;
507 
508 	mutex_lock(&adev->dm.dc_lock);
509 
510 	/* Enable or disable CRTC CRC generation */
511 	if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
512 		if (!dc_stream_configure_crc(stream_state->ctx->dc,
513 					     stream_state, NULL, enable, enable, 0, true)) {
514 			ret = -EINVAL;
515 			goto unlock;
516 		}
517 	}
518 
519 	/* Configure dithering */
520 	if (!dm_need_crc_dither(source)) {
521 		dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
522 		dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
523 					    DYN_EXPANSION_DISABLE);
524 	} else {
525 		dc_stream_set_dither_option(stream_state,
526 					    DITHER_OPTION_DEFAULT);
527 		dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
528 					    DYN_EXPANSION_AUTO);
529 	}
530 
531 unlock:
532 	mutex_unlock(&adev->dm.dc_lock);
533 
534 	return ret;
535 }
536 
537 int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
538 {
539 	enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
540 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
541 	struct drm_crtc_commit *commit;
542 	struct dm_crtc_state *crtc_state;
543 	struct drm_device *drm_dev = crtc->dev;
544 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
545 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
546 	struct amdgpu_display_manager *dm = &adev->dm;
547 #endif
548 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
549 	struct drm_dp_aux *aux = NULL;
550 	bool enable = false;
551 	bool enabled = false;
552 	int ret = 0;
553 
554 	if (source < 0) {
555 		DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
556 				 src_name, crtc->index);
557 		return -EINVAL;
558 	}
559 
560 	ret = drm_modeset_lock(&crtc->mutex, NULL);
561 	if (ret)
562 		return ret;
563 
564 	spin_lock(&crtc->commit_lock);
565 	commit = list_first_entry_or_null(&crtc->commit_list,
566 					  struct drm_crtc_commit, commit_entry);
567 	if (commit)
568 		drm_crtc_commit_get(commit);
569 	spin_unlock(&crtc->commit_lock);
570 
571 	if (commit) {
572 		/*
573 		 * Need to wait for all outstanding programming to complete
574 		 * in commit tail since it can modify CRC related fields and
575 		 * hardware state. Since we're holding the CRTC lock we're
576 		 * guaranteed that no other commit work can be queued off
577 		 * before we modify the state below.
578 		 */
579 		ret = wait_for_completion_interruptible_timeout(
580 			&commit->hw_done, 10 * HZ);
581 		if (ret)
582 			goto cleanup;
583 	}
584 
585 	enable = amdgpu_dm_is_valid_crc_source(source);
586 	crtc_state = to_dm_crtc_state(crtc->state);
587 	spin_lock_irq(&drm_dev->event_lock);
588 	cur_crc_src = acrtc->dm_irq_params.crc_src;
589 	spin_unlock_irq(&drm_dev->event_lock);
590 
591 	/*
592 	 * USER REQ SRC | CURRENT SRC | BEHAVIOR
593 	 * -----------------------------
594 	 * None         | None        | Do nothing
595 	 * None         | CRTC        | Disable CRTC CRC, set default to dither
596 	 * None         | DPRX        | Disable DPRX CRC, need 'aux', set default to dither
597 	 * None         | CRTC DITHER | Disable CRTC CRC
598 	 * None         | DPRX DITHER | Disable DPRX CRC, need 'aux'
599 	 * CRTC         | XXXX        | Enable CRTC CRC, no dither
600 	 * DPRX         | XXXX        | Enable DPRX CRC, need 'aux', no dither
601 	 * CRTC DITHER  | XXXX        | Enable CRTC CRC, set dither
602 	 * DPRX DITHER  | XXXX        | Enable DPRX CRC, need 'aux', set dither
603 	 */
604 	if (dm_is_crc_source_dprx(source) ||
605 	    (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE &&
606 	     dm_is_crc_source_dprx(cur_crc_src))) {
607 		struct amdgpu_dm_connector *aconn = NULL;
608 		struct drm_connector *connector;
609 		struct drm_connector_list_iter conn_iter;
610 
611 		drm_connector_list_iter_begin(crtc->dev, &conn_iter);
612 		drm_for_each_connector_iter(connector, &conn_iter) {
613 			if (!connector->state || connector->state->crtc != crtc)
614 				continue;
615 
616 			if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
617 				continue;
618 
619 			aconn = to_amdgpu_dm_connector(connector);
620 			break;
621 		}
622 		drm_connector_list_iter_end(&conn_iter);
623 
624 		if (!aconn) {
625 			DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index);
626 			ret = -EINVAL;
627 			goto cleanup;
628 		}
629 
630 		aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux;
631 
632 		if (!aux) {
633 			DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
634 			ret = -EINVAL;
635 			goto cleanup;
636 		}
637 
638 		if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
639 				(aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
640 			DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
641 			ret = -EINVAL;
642 			goto cleanup;
643 		}
644 
645 	}
646 
647 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
648 	/* Reset secure_display when we change crc source from debugfs */
649 	amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
650 #endif
651 
652 	if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
653 		ret = -EINVAL;
654 		goto cleanup;
655 	}
656 
657 	/*
658 	 * Reading the CRC requires the vblank interrupt handler to be
659 	 * enabled. Keep a reference until CRC capture stops.
660 	 */
661 	enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
662 	if (!enabled && enable) {
663 		ret = drm_crtc_vblank_get(crtc);
664 		if (ret)
665 			goto cleanup;
666 
667 		if (dm_is_crc_source_dprx(source)) {
668 			if (drm_dp_start_crc(aux, crtc)) {
669 				DRM_DEBUG_DRIVER("dp start crc failed\n");
670 				ret = -EINVAL;
671 				goto cleanup;
672 			}
673 		}
674 	} else if (enabled && !enable) {
675 		drm_crtc_vblank_put(crtc);
676 		if (dm_is_crc_source_dprx(source)) {
677 			if (drm_dp_stop_crc(aux)) {
678 				DRM_DEBUG_DRIVER("dp stop crc failed\n");
679 				ret = -EINVAL;
680 				goto cleanup;
681 			}
682 		}
683 	}
684 
685 	spin_lock_irq(&drm_dev->event_lock);
686 	acrtc->dm_irq_params.crc_src = source;
687 	spin_unlock_irq(&drm_dev->event_lock);
688 
689 	/* Reset crc_skipped on dm state */
690 	crtc_state->crc_skip_count = 0;
691 
692 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
693 	/* Initialize phy id mapping table for secure display*/
694 	if (dm->secure_display_ctx.op_mode == LEGACY_MODE &&
695 		!dm->secure_display_ctx.phy_mapping_updated)
696 		update_phy_id_mapping(adev);
697 #endif
698 
699 cleanup:
700 	if (commit)
701 		drm_crtc_commit_put(commit);
702 
703 	drm_modeset_unlock(&crtc->mutex);
704 
705 	return ret;
706 }
707 
708 /**
709  * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC.
710  * @crtc: DRM CRTC object.
711  *
712  * This function should be called at the end of a vblank, when the fb has been
713  * fully processed through the pipe.
714  */
715 void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
716 {
717 	struct dm_crtc_state *crtc_state;
718 	struct dc_stream_state *stream_state;
719 	struct drm_device *drm_dev = NULL;
720 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
721 	struct amdgpu_crtc *acrtc = NULL;
722 	uint32_t crcs[3];
723 	unsigned long flags;
724 
725 	if (crtc == NULL)
726 		return;
727 
728 	crtc_state = to_dm_crtc_state(crtc->state);
729 	stream_state = crtc_state->stream;
730 	acrtc = to_amdgpu_crtc(crtc);
731 	drm_dev = crtc->dev;
732 
733 	spin_lock_irqsave(&drm_dev->event_lock, flags);
734 	cur_crc_src = acrtc->dm_irq_params.crc_src;
735 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
736 
737 	/* Early return if CRC capture is not enabled. */
738 	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
739 		return;
740 
741 	/*
742 	 * Since flipping and crc enablement happen asynchronously, we - more
743 	 * often than not - will be returning an 'uncooked' crc on first frame.
744 	 * Probably because hw isn't ready yet. For added security, skip the
745 	 * first two CRC values.
746 	 */
747 	if (crtc_state->crc_skip_count < 2) {
748 		crtc_state->crc_skip_count += 1;
749 		return;
750 	}
751 
752 	if (dm_is_crc_source_crtc(cur_crc_src)) {
753 		if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
754 				       &crcs[0], &crcs[1], &crcs[2]))
755 			return;
756 
757 		drm_crtc_add_crc_entry(crtc, true,
758 				       drm_crtc_accurate_vblank_count(crtc), crcs);
759 	}
760 }
761 
762 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
763 void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
764 {
765 	struct drm_device *drm_dev = NULL;
766 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
767 	struct amdgpu_crtc *acrtc = NULL;
768 	struct amdgpu_device *adev = NULL;
769 	struct secure_display_crtc_context *crtc_ctx = NULL;
770 	bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false};
771 	uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0};
772 	uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0};
773 	uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0};
774 	unsigned long flags1;
775 	bool forward_roi_change = false;
776 	bool notify_ta = false;
777 	bool all_crc_ready = true;
778 	struct dc_stream_state *stream_state;
779 	int i;
780 
781 	if (crtc == NULL)
782 		return;
783 
784 	acrtc = to_amdgpu_crtc(crtc);
785 	adev = drm_to_adev(crtc->dev);
786 	drm_dev = crtc->dev;
787 	stream_state = to_dm_crtc_state(crtc->state)->stream;
788 
789 	spin_lock_irqsave(&drm_dev->event_lock, flags1);
790 	cur_crc_src = acrtc->dm_irq_params.crc_src;
791 
792 	/* Early return if CRC capture is not enabled. */
793 	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
794 	    !dm_is_crc_source_crtc(cur_crc_src)) {
795 		spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
796 		return;
797 	}
798 
799 	if (!acrtc->dm_irq_params.crc_window_activated) {
800 		spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
801 		return;
802 	}
803 
804 	crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
805 	if (WARN_ON(crtc_ctx->crtc != crtc)) {
806 		/* We have set the crtc when creating secure_display_crtc_context,
807 		 * don't expect it to be changed here.
808 		 */
809 		crtc_ctx->crtc = crtc;
810 	}
811 
812 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
813 		struct crc_params crc_window = {
814 			.windowa_x_start = acrtc->dm_irq_params.window_param[i].x_start,
815 			.windowa_y_start = acrtc->dm_irq_params.window_param[i].y_start,
816 			.windowa_x_end = acrtc->dm_irq_params.window_param[i].x_end,
817 			.windowa_y_end = acrtc->dm_irq_params.window_param[i].y_end,
818 			.windowb_x_start = acrtc->dm_irq_params.window_param[i].x_start,
819 			.windowb_y_start = acrtc->dm_irq_params.window_param[i].y_start,
820 			.windowb_x_end = acrtc->dm_irq_params.window_param[i].x_end,
821 			.windowb_y_end = acrtc->dm_irq_params.window_param[i].y_end,
822 		};
823 
824 		crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable;
825 
826 		if (!acrtc->dm_irq_params.window_param[i].enable) {
827 			crtc_ctx->crc_info.crc[i].crc_ready = false;
828 			continue;
829 		}
830 
831 		if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) {
832 			acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1;
833 			crtc_ctx->crc_info.crc[i].crc_ready = false;
834 			continue;
835 		}
836 
837 		if (acrtc->dm_irq_params.window_param[i].update_win) {
838 			crtc_ctx->roi[i].rect.x = crc_window.windowa_x_start;
839 			crtc_ctx->roi[i].rect.y = crc_window.windowa_y_start;
840 			crtc_ctx->roi[i].rect.width = crc_window.windowa_x_end -
841 						crc_window.windowa_x_start;
842 			crtc_ctx->roi[i].rect.height = crc_window.windowa_y_end -
843 						crc_window.windowa_y_start;
844 
845 			if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
846 				/* forward task to dmub to update ROI */
847 				forward_roi_change = true;
848 			else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
849 				/* update ROI via dm*/
850 				dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
851 					&crc_window, true, true, i, false);
852 
853 			reset_crc_frame_count[i] = true;
854 
855 			acrtc->dm_irq_params.window_param[i].update_win = false;
856 
857 			/* Statically skip 1 frame, because we may need to wait below things
858 			 * before sending ROI to dmub:
859 			 * 1. We defer the work by using system workqueue.
860 			 * 2. We may need to wait for dc_lock before accessing dmub.
861 			 */
862 			acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
863 			crtc_ctx->crc_info.crc[i].crc_ready = false;
864 		} else {
865 			if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
866 						&crc_r[i], &crc_g[i], &crc_b[i]))
867 				DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
868 
869 			if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
870 				/* forward task to psp to read ROI/CRC and output via I2C */
871 				notify_ta = true;
872 			else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
873 				/* Avoid ROI window get changed, keep overwriting. */
874 				dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
875 						&crc_window, true, true, i, false);
876 
877 			/* crc ready for psp to read out */
878 			crtc_ctx->crc_info.crc[i].crc_ready = true;
879 		}
880 	}
881 
882 	spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
883 
884 	if (forward_roi_change)
885 		schedule_work(&crtc_ctx->forward_roi_work);
886 
887 	if (notify_ta)
888 		schedule_work(&crtc_ctx->notify_ta_work);
889 
890 	spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1);
891 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
892 		crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
893 		crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
894 		crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
895 
896 		if (!crtc_ctx->roi[i].enable) {
897 			crtc_ctx->crc_info.crc[i].frame_count = 0;
898 			continue;
899 		}
900 
901 		if (!crtc_ctx->crc_info.crc[i].crc_ready)
902 			all_crc_ready = false;
903 
904 		if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
905 			/* Reset the reference frame count after user update the ROI
906 			 * or it reaches the maximum value.
907 			 */
908 			crtc_ctx->crc_info.crc[i].frame_count = 0;
909 		else
910 			crtc_ctx->crc_info.crc[i].frame_count += 1;
911 	}
912 	spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
913 
914 	if (all_crc_ready)
915 		complete_all(&crtc_ctx->crc_info.completion);
916 }
917 
918 void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
919 {
920 	struct secure_display_crtc_context *crtc_ctx = NULL;
921 	int i;
922 
923 	crtc_ctx = kcalloc(adev->mode_info.num_crtc,
924 				      sizeof(struct secure_display_crtc_context),
925 				      GFP_KERNEL);
926 
927 	if (!crtc_ctx) {
928 		adev->dm.secure_display_ctx.crtc_ctx = NULL;
929 		return;
930 	}
931 
932 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
933 		INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
934 		INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
935 		crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
936 		spin_lock_init(&crtc_ctx[i].crc_info.lock);
937 	}
938 
939 	adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
940 
941 	adev->dm.secure_display_ctx.op_mode = DISPLAY_CRC_MODE;
942 }
943 #endif
944