1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <drm/drm_crtc.h>
27 #include <drm/drm_vblank.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_dm.h"
31 #include "dc.h"
32 #include "amdgpu_securedisplay.h"
33 
34 static const char *const pipe_crc_sources[] = {
35 	"none",
36 	"crtc",
37 	"crtc dither",
38 	"dprx",
39 	"dprx dither",
40 	"auto",
41 };
42 
43 static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
44 {
45 	if (!source || !strcmp(source, "none"))
46 		return AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
47 	if (!strcmp(source, "auto") || !strcmp(source, "crtc"))
48 		return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC;
49 	if (!strcmp(source, "dprx"))
50 		return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX;
51 	if (!strcmp(source, "crtc dither"))
52 		return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER;
53 	if (!strcmp(source, "dprx dither"))
54 		return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER;
55 
56 	return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
57 }
58 
59 static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)
60 {
61 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) ||
62 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER);
63 }
64 
65 static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)
66 {
67 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) ||
68 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER);
69 }
70 
71 static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)
72 {
73 	return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) ||
74 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) ||
75 	       (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE);
76 }
77 
78 const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
79 						  size_t *count)
80 {
81 	*count = ARRAY_SIZE(pipe_crc_sources);
82 	return pipe_crc_sources;
83 }
84 
85 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
86 static void update_phy_id_mapping(struct amdgpu_device *adev)
87 {
88 	struct drm_device *ddev = adev_to_drm(adev);
89 	struct amdgpu_display_manager *dm = &adev->dm;
90 	struct drm_connector *connector;
91 	struct amdgpu_dm_connector *aconnector;
92 	struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL};
93 	struct drm_connector_list_iter iter;
94 	uint8_t idx = 0, idx_2 = 0, connector_cnt = 0;
95 
96 	dm->secure_display_ctx.phy_mapping_updated = false;
97 
98 	mutex_lock(&ddev->mode_config.mutex);
99 	drm_connector_list_iter_begin(ddev, &iter);
100 	drm_for_each_connector_iter(connector, &iter) {
101 
102 		if (connector->status != connector_status_connected)
103 			continue;
104 
105 		if (idx >= AMDGPU_DM_MAX_CRTC) {
106 			DRM_WARN("%s connected connectors exceed max crtc\n", __func__);
107 			mutex_unlock(&ddev->mode_config.mutex);
108 			return;
109 		}
110 
111 		aconnector = to_amdgpu_dm_connector(connector);
112 
113 		sort_connector[idx] = aconnector;
114 		idx++;
115 		connector_cnt++;
116 	}
117 	drm_connector_list_iter_end(&iter);
118 
119 	/* sort connectors by link_enc_hw_instance first */
120 	for (idx = connector_cnt; idx > 1 ; idx--) {
121 		for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) {
122 			if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
123 			    sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
124 				swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
125 		}
126 	}
127 
128 	/*
129 	 * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already
130 	 * sorted together above.
131 	 */
132 	for (idx = 0; idx < connector_cnt; /*Do nothing*/) {
133 		if (sort_connector[idx]->mst_root) {
134 			uint8_t i, j, k;
135 			uint8_t mst_con_cnt = 1;
136 
137 			for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) {
138 				if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
139 					mst_con_cnt++;
140 				else
141 					break;
142 			}
143 
144 			for (i = mst_con_cnt; i > 1; i--) {
145 				for (j = idx; j < (idx + i - 2); j++) {
146 					int mstb_lct = sort_connector[j]->mst_output_port->parent->lct;
147 					int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
148 					u8 *rad;
149 					u8 *next_rad;
150 					bool swap = false;
151 
152 					/* Sort by mst tree depth first. Then compare RAD if depth is the same*/
153 					if (mstb_lct > next_mstb_lct) {
154 						swap = true;
155 					} else if (mstb_lct == next_mstb_lct) {
156 						if (mstb_lct == 1) {
157 							if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
158 								swap = true;
159 						} else if (mstb_lct > 1) {
160 							rad = sort_connector[j]->mst_output_port->parent->rad;
161 							next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
162 
163 							for (k = 0; k < mstb_lct - 1; k++) {
164 								int shift = (k % 2) ? 0 : 4;
165 								int port_num = (rad[k / 2] >> shift) & 0xf;
166 								int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
167 
168 								if (port_num > next_port_num) {
169 									swap = true;
170 									break;
171 								}
172 							}
173 						} else {
174 							DRM_ERROR("MST LCT shouldn't be set as < 1");
175 							mutex_unlock(&ddev->mode_config.mutex);
176 							return;
177 						}
178 					}
179 
180 					if (swap)
181 						swap(sort_connector[j], sort_connector[j + 1]);
182 				}
183 			}
184 
185 			idx += mst_con_cnt;
186 		} else {
187 			idx++;
188 		}
189 	}
190 
191 	/* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
192 	memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping));
193 	for (idx = 0; idx < connector_cnt; idx++) {
194 		aconnector = sort_connector[idx];
195 
196 		dm->secure_display_ctx.phy_id_mapping[idx].assigned = true;
197 		dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false;
198 		dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst;
199 
200 		if (sort_connector[idx]->mst_root) {
201 			dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true;
202 			dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct;
203 			dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num;
204 			memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad,
205 				aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad));
206 		}
207 	}
208 	mutex_unlock(&ddev->mode_config.mutex);
209 
210 	dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt;
211 	dm->secure_display_ctx.phy_mapping_updated = true;
212 }
213 
214 static bool get_phy_id(struct amdgpu_display_manager *dm,
215 			struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
216 {
217 	int idx, idx_2;
218 	bool found = false;
219 
220 	/*
221 	 * Assume secure display start after all connectors are probed. The connection
222 	 * config is static as well
223 	 */
224 	if (!dm->secure_display_ctx.phy_mapping_updated) {
225 		DRM_WARN("%s Should update the phy id table before get it's value", __func__);
226 		return false;
227 	}
228 
229 	for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) {
230 		if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
231 			DRM_ERROR("phy_id_mapping[%d] should be assigned", idx);
232 			return false;
233 		}
234 
235 		if (aconnector->dc_link->link_enc_hw_inst ==
236 				dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) {
237 			if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
238 				found = true;
239 				goto out;
240 			} else {
241 				/* Could caused by wrongly pass mst root connector */
242 				if (!aconnector->mst_output_port) {
243 					DRM_ERROR("%s Check mst case but connector without a port assigned", __func__);
244 					return false;
245 				}
246 
247 				if (aconnector->mst_root &&
248 					aconnector->mst_root->mst_mgr.mst_primary == NULL) {
249 					DRM_WARN("%s pass in a stale mst connector", __func__);
250 				}
251 
252 				if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
253 					aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) {
254 					if (aconnector->mst_output_port->parent->lct == 1) {
255 						found = true;
256 						goto out;
257 					} else if (aconnector->mst_output_port->parent->lct > 1) {
258 						/* Check RAD */
259 						for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) {
260 							int shift = (idx_2 % 2) ? 0 : 4;
261 							int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf;
262 							int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
263 
264 							if (port_num != port_num2)
265 								break;
266 						}
267 
268 						if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
269 							found = true;
270 							goto out;
271 						}
272 					} else {
273 						DRM_ERROR("lCT should be >= 1");
274 						return false;
275 					}
276 				}
277 			}
278 		}
279 	}
280 
281 out:
282 	if (found) {
283 		DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
284 		*phy_id = idx;
285 	} else {
286 		DRM_WARN("Can't find associated phy ID");
287 		return false;
288 	}
289 
290 	return true;
291 }
292 
293 static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
294 {
295 	struct drm_device *drm_dev = crtc->dev;
296 	struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
297 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
298 	struct amdgpu_dm_connector *aconnector;
299 	bool was_activated;
300 	uint8_t phy_id;
301 	unsigned long flags;
302 	int i;
303 
304 	spin_lock_irqsave(&drm_dev->event_lock, flags);
305 	was_activated = acrtc->dm_irq_params.crc_window_activated;
306 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
307 		acrtc->dm_irq_params.window_param[i].x_start = 0;
308 		acrtc->dm_irq_params.window_param[i].y_start = 0;
309 		acrtc->dm_irq_params.window_param[i].x_end = 0;
310 		acrtc->dm_irq_params.window_param[i].y_end = 0;
311 		acrtc->dm_irq_params.window_param[i].enable = false;
312 		acrtc->dm_irq_params.window_param[i].update_win = false;
313 		acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0;
314 	}
315 	acrtc->dm_irq_params.crc_window_activated = false;
316 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
317 
318 	/* Disable secure_display if it was enabled */
319 	if (was_activated) {
320 		/* stop ROI update on this crtc */
321 		flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
322 		flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
323 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
324 
325 		if (aconnector && get_phy_id(dm, aconnector, &phy_id)) {
326 			if (dm->secure_display_ctx.support_mul_roi)
327 				dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true);
328 			else
329 				dc_stream_forward_crc_window(stream, NULL, phy_id, true);
330 		} else {
331 			DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
332 		}
333 	}
334 }
335 
336 static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
337 {
338 	struct secure_display_crtc_context *crtc_ctx;
339 	struct psp_context *psp;
340 	struct ta_securedisplay_cmd *securedisplay_cmd;
341 	struct drm_crtc *crtc;
342 	struct dc_stream_state *stream;
343 	struct amdgpu_dm_connector *aconnector;
344 	uint8_t phy_inst;
345 	struct amdgpu_display_manager *dm;
346 	int ret;
347 
348 	crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
349 	crtc = crtc_ctx->crtc;
350 
351 	if (!crtc)
352 		return;
353 
354 	psp = &drm_to_adev(crtc->dev)->psp;
355 
356 	if (!psp->securedisplay_context.context.initialized) {
357 		DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n");
358 		return;
359 	}
360 
361 	dm = &drm_to_adev(crtc->dev)->dm;
362 	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
363 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
364 	if (!aconnector)
365 		return;
366 
367 	mutex_lock(&crtc->dev->mode_config.mutex);
368 	if (!get_phy_id(dm, aconnector, &phy_inst)) {
369 		DRM_WARN("%s Can't find mapping phy id!", __func__);
370 		mutex_unlock(&crtc->dev->mode_config.mutex);
371 		return;
372 	}
373 	mutex_unlock(&crtc->dev->mode_config.mutex);
374 
375 	/* need lock for multiple crtcs to use the command buffer */
376 	mutex_lock(&psp->securedisplay_context.mutex);
377 
378 	psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
379 						TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
380 
381 	securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
382 
383 	/* PSP TA is expected to finish data transmission over I2C within current frame,
384 	 * even there are up to 4 crtcs request to send in this frame.
385 	 */
386 	ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
387 
388 	if (!ret) {
389 		if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
390 			psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
391 	}
392 
393 	mutex_unlock(&psp->securedisplay_context.mutex);
394 }
395 
396 static void
397 amdgpu_dm_forward_crc_window(struct work_struct *work)
398 {
399 	struct secure_display_crtc_context *crtc_ctx;
400 	struct amdgpu_display_manager *dm;
401 	struct drm_crtc *crtc;
402 	struct dc_stream_state *stream;
403 	struct amdgpu_dm_connector *aconnector;
404 	struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM];
405 	unsigned long flags;
406 	uint8_t phy_id;
407 
408 	crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
409 	crtc = crtc_ctx->crtc;
410 
411 	if (!crtc)
412 		return;
413 
414 	dm = &drm_to_adev(crtc->dev)->dm;
415 	stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
416 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
417 
418 	if (!aconnector)
419 		return;
420 
421 	mutex_lock(&crtc->dev->mode_config.mutex);
422 	if (!get_phy_id(dm, aconnector, &phy_id)) {
423 		DRM_WARN("%s Can't find mapping phy id!", __func__);
424 		mutex_unlock(&crtc->dev->mode_config.mutex);
425 		return;
426 	}
427 	mutex_unlock(&crtc->dev->mode_config.mutex);
428 
429 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
430 	memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM);
431 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
432 
433 	mutex_lock(&dm->dc_lock);
434 	if (dm->secure_display_ctx.support_mul_roi)
435 		dc_stream_forward_multiple_crc_window(stream, roi_cpy,
436 			phy_id, false);
437 	else
438 		dc_stream_forward_crc_window(stream, &roi_cpy[0].rect,
439 			phy_id, false);
440 	mutex_unlock(&dm->dc_lock);
441 }
442 
443 bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
444 {
445 	struct drm_device *drm_dev = crtc->dev;
446 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
447 	bool ret = false;
448 
449 	spin_lock_irq(&drm_dev->event_lock);
450 	ret = acrtc->dm_irq_params.crc_window_activated;
451 	spin_unlock_irq(&drm_dev->event_lock);
452 
453 	return ret;
454 }
455 #endif
456 
457 int
458 amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
459 				 size_t *values_cnt)
460 {
461 	enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
462 
463 	if (source < 0) {
464 		DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
465 				 src_name, crtc->index);
466 		return -EINVAL;
467 	}
468 
469 	*values_cnt = 3;
470 	return 0;
471 }
472 
473 int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
474 					struct dm_crtc_state *dm_crtc_state,
475 					enum amdgpu_dm_pipe_crc_source source)
476 {
477 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
478 	struct dc_stream_state *stream_state = dm_crtc_state->stream;
479 	bool enable = amdgpu_dm_is_valid_crc_source(source);
480 	int ret = 0;
481 
482 	/* Configuration will be deferred to stream enable. */
483 	if (!stream_state)
484 		return -EINVAL;
485 
486 	mutex_lock(&adev->dm.dc_lock);
487 
488 	/* Enable or disable CRTC CRC generation */
489 	if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
490 		if (!dc_stream_configure_crc(stream_state->ctx->dc,
491 					     stream_state, NULL, enable, enable)) {
492 			ret = -EINVAL;
493 			goto unlock;
494 		}
495 	}
496 
497 	/* Configure dithering */
498 	if (!dm_need_crc_dither(source)) {
499 		dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
500 		dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
501 					    DYN_EXPANSION_DISABLE);
502 	} else {
503 		dc_stream_set_dither_option(stream_state,
504 					    DITHER_OPTION_DEFAULT);
505 		dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
506 					    DYN_EXPANSION_AUTO);
507 	}
508 
509 unlock:
510 	mutex_unlock(&adev->dm.dc_lock);
511 
512 	return ret;
513 }
514 
515 int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
516 {
517 	enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
518 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
519 	struct drm_crtc_commit *commit;
520 	struct dm_crtc_state *crtc_state;
521 	struct drm_device *drm_dev = crtc->dev;
522 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
523 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
524 	struct amdgpu_display_manager *dm = &adev->dm;
525 #endif
526 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
527 	struct drm_dp_aux *aux = NULL;
528 	bool enable = false;
529 	bool enabled = false;
530 	int ret = 0;
531 
532 	if (source < 0) {
533 		DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
534 				 src_name, crtc->index);
535 		return -EINVAL;
536 	}
537 
538 	ret = drm_modeset_lock(&crtc->mutex, NULL);
539 	if (ret)
540 		return ret;
541 
542 	spin_lock(&crtc->commit_lock);
543 	commit = list_first_entry_or_null(&crtc->commit_list,
544 					  struct drm_crtc_commit, commit_entry);
545 	if (commit)
546 		drm_crtc_commit_get(commit);
547 	spin_unlock(&crtc->commit_lock);
548 
549 	if (commit) {
550 		/*
551 		 * Need to wait for all outstanding programming to complete
552 		 * in commit tail since it can modify CRC related fields and
553 		 * hardware state. Since we're holding the CRTC lock we're
554 		 * guaranteed that no other commit work can be queued off
555 		 * before we modify the state below.
556 		 */
557 		ret = wait_for_completion_interruptible_timeout(
558 			&commit->hw_done, 10 * HZ);
559 		if (ret)
560 			goto cleanup;
561 	}
562 
563 	enable = amdgpu_dm_is_valid_crc_source(source);
564 	crtc_state = to_dm_crtc_state(crtc->state);
565 	spin_lock_irq(&drm_dev->event_lock);
566 	cur_crc_src = acrtc->dm_irq_params.crc_src;
567 	spin_unlock_irq(&drm_dev->event_lock);
568 
569 	/*
570 	 * USER REQ SRC | CURRENT SRC | BEHAVIOR
571 	 * -----------------------------
572 	 * None         | None        | Do nothing
573 	 * None         | CRTC        | Disable CRTC CRC, set default to dither
574 	 * None         | DPRX        | Disable DPRX CRC, need 'aux', set default to dither
575 	 * None         | CRTC DITHER | Disable CRTC CRC
576 	 * None         | DPRX DITHER | Disable DPRX CRC, need 'aux'
577 	 * CRTC         | XXXX        | Enable CRTC CRC, no dither
578 	 * DPRX         | XXXX        | Enable DPRX CRC, need 'aux', no dither
579 	 * CRTC DITHER  | XXXX        | Enable CRTC CRC, set dither
580 	 * DPRX DITHER  | XXXX        | Enable DPRX CRC, need 'aux', set dither
581 	 */
582 	if (dm_is_crc_source_dprx(source) ||
583 	    (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE &&
584 	     dm_is_crc_source_dprx(cur_crc_src))) {
585 		struct amdgpu_dm_connector *aconn = NULL;
586 		struct drm_connector *connector;
587 		struct drm_connector_list_iter conn_iter;
588 
589 		drm_connector_list_iter_begin(crtc->dev, &conn_iter);
590 		drm_for_each_connector_iter(connector, &conn_iter) {
591 			if (!connector->state || connector->state->crtc != crtc)
592 				continue;
593 
594 			if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
595 				continue;
596 
597 			aconn = to_amdgpu_dm_connector(connector);
598 			break;
599 		}
600 		drm_connector_list_iter_end(&conn_iter);
601 
602 		if (!aconn) {
603 			DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index);
604 			ret = -EINVAL;
605 			goto cleanup;
606 		}
607 
608 		aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux;
609 
610 		if (!aux) {
611 			DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
612 			ret = -EINVAL;
613 			goto cleanup;
614 		}
615 
616 		if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
617 				(aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
618 			DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
619 			ret = -EINVAL;
620 			goto cleanup;
621 		}
622 
623 	}
624 
625 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
626 	/* Reset secure_display when we change crc source from debugfs */
627 	amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
628 #endif
629 
630 	if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
631 		ret = -EINVAL;
632 		goto cleanup;
633 	}
634 
635 	/*
636 	 * Reading the CRC requires the vblank interrupt handler to be
637 	 * enabled. Keep a reference until CRC capture stops.
638 	 */
639 	enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
640 	if (!enabled && enable) {
641 		ret = drm_crtc_vblank_get(crtc);
642 		if (ret)
643 			goto cleanup;
644 
645 		if (dm_is_crc_source_dprx(source)) {
646 			if (drm_dp_start_crc(aux, crtc)) {
647 				DRM_DEBUG_DRIVER("dp start crc failed\n");
648 				ret = -EINVAL;
649 				goto cleanup;
650 			}
651 		}
652 	} else if (enabled && !enable) {
653 		drm_crtc_vblank_put(crtc);
654 		if (dm_is_crc_source_dprx(source)) {
655 			if (drm_dp_stop_crc(aux)) {
656 				DRM_DEBUG_DRIVER("dp stop crc failed\n");
657 				ret = -EINVAL;
658 				goto cleanup;
659 			}
660 		}
661 	}
662 
663 	spin_lock_irq(&drm_dev->event_lock);
664 	acrtc->dm_irq_params.crc_src = source;
665 	spin_unlock_irq(&drm_dev->event_lock);
666 
667 	/* Reset crc_skipped on dm state */
668 	crtc_state->crc_skip_count = 0;
669 
670 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
671 	/* Initialize phy id mapping table for secure display*/
672 	if (!dm->secure_display_ctx.phy_mapping_updated)
673 		update_phy_id_mapping(adev);
674 #endif
675 
676 cleanup:
677 	if (commit)
678 		drm_crtc_commit_put(commit);
679 
680 	drm_modeset_unlock(&crtc->mutex);
681 
682 	return ret;
683 }
684 
685 /**
686  * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC.
687  * @crtc: DRM CRTC object.
688  *
689  * This function should be called at the end of a vblank, when the fb has been
690  * fully processed through the pipe.
691  */
692 void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
693 {
694 	struct dm_crtc_state *crtc_state;
695 	struct dc_stream_state *stream_state;
696 	struct drm_device *drm_dev = NULL;
697 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
698 	struct amdgpu_crtc *acrtc = NULL;
699 	uint32_t crcs[3];
700 	unsigned long flags;
701 
702 	if (crtc == NULL)
703 		return;
704 
705 	crtc_state = to_dm_crtc_state(crtc->state);
706 	stream_state = crtc_state->stream;
707 	acrtc = to_amdgpu_crtc(crtc);
708 	drm_dev = crtc->dev;
709 
710 	spin_lock_irqsave(&drm_dev->event_lock, flags);
711 	cur_crc_src = acrtc->dm_irq_params.crc_src;
712 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
713 
714 	/* Early return if CRC capture is not enabled. */
715 	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
716 		return;
717 
718 	/*
719 	 * Since flipping and crc enablement happen asynchronously, we - more
720 	 * often than not - will be returning an 'uncooked' crc on first frame.
721 	 * Probably because hw isn't ready yet. For added security, skip the
722 	 * first two CRC values.
723 	 */
724 	if (crtc_state->crc_skip_count < 2) {
725 		crtc_state->crc_skip_count += 1;
726 		return;
727 	}
728 
729 	if (dm_is_crc_source_crtc(cur_crc_src)) {
730 		if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
731 				       &crcs[0], &crcs[1], &crcs[2]))
732 			return;
733 
734 		drm_crtc_add_crc_entry(crtc, true,
735 				       drm_crtc_accurate_vblank_count(crtc), crcs);
736 	}
737 }
738 
739 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
740 void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
741 {
742 	struct drm_device *drm_dev = NULL;
743 	enum amdgpu_dm_pipe_crc_source cur_crc_src;
744 	struct amdgpu_crtc *acrtc = NULL;
745 	struct amdgpu_device *adev = NULL;
746 	struct secure_display_crtc_context *crtc_ctx = NULL;
747 	bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false};
748 	uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0};
749 	uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0};
750 	uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0};
751 	unsigned long flags1;
752 	bool forward_roi_change = false;
753 	bool notify_ta = false;
754 	bool all_crc_ready = true;
755 	int i;
756 
757 	if (crtc == NULL)
758 		return;
759 
760 	acrtc = to_amdgpu_crtc(crtc);
761 	adev = drm_to_adev(crtc->dev);
762 	drm_dev = crtc->dev;
763 
764 	spin_lock_irqsave(&drm_dev->event_lock, flags1);
765 	cur_crc_src = acrtc->dm_irq_params.crc_src;
766 
767 	/* Early return if CRC capture is not enabled. */
768 	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
769 	    !dm_is_crc_source_crtc(cur_crc_src)) {
770 		spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
771 		return;
772 	}
773 
774 	if (!acrtc->dm_irq_params.crc_window_activated) {
775 		spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
776 		return;
777 	}
778 
779 	crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
780 	if (WARN_ON(crtc_ctx->crtc != crtc)) {
781 		/* We have set the crtc when creating secure_display_crtc_context,
782 		 * don't expect it to be changed here.
783 		 */
784 		crtc_ctx->crtc = crtc;
785 	}
786 
787 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
788 		crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable;
789 
790 		if (!acrtc->dm_irq_params.window_param[i].enable) {
791 			crtc_ctx->crc_info.crc[i].crc_ready = false;
792 			continue;
793 		}
794 
795 		if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) {
796 			acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1;
797 			crtc_ctx->crc_info.crc[i].crc_ready = false;
798 			continue;
799 		}
800 
801 		if (acrtc->dm_irq_params.window_param[i].update_win) {
802 			/* prepare work for dmub to update ROI */
803 			crtc_ctx->roi[i].rect.x = acrtc->dm_irq_params.window_param[i].x_start;
804 			crtc_ctx->roi[i].rect.y = acrtc->dm_irq_params.window_param[i].y_start;
805 			crtc_ctx->roi[i].rect.width = acrtc->dm_irq_params.window_param[i].x_end -
806 						acrtc->dm_irq_params.window_param[i].x_start;
807 			crtc_ctx->roi[i].rect.height = acrtc->dm_irq_params.window_param[i].y_end -
808 						acrtc->dm_irq_params.window_param[i].y_start;
809 
810 			forward_roi_change = true;
811 
812 			reset_crc_frame_count[i] = true;
813 
814 			acrtc->dm_irq_params.window_param[i].update_win = false;
815 
816 			/* Statically skip 1 frame, because we may need to wait below things
817 			 * before sending ROI to dmub:
818 			 * 1. We defer the work by using system workqueue.
819 			 * 2. We may need to wait for dc_lock before accessing dmub.
820 			 */
821 			acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
822 			crtc_ctx->crc_info.crc[i].crc_ready = false;
823 		} else {
824 			struct dc_stream_state *stream_state = to_dm_crtc_state(crtc->state)->stream;
825 
826 			if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
827 						&crc_r[i], &crc_g[i], &crc_b[i]))
828 				DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
829 
830 			/* prepare work for psp to read ROI/CRC and send to I2C */
831 			notify_ta = true;
832 			/* crc ready for psp to read out */
833 			crtc_ctx->crc_info.crc[i].crc_ready = true;
834 		}
835 	}
836 
837 	spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
838 
839 	if (forward_roi_change)
840 		schedule_work(&crtc_ctx->forward_roi_work);
841 
842 	if (notify_ta)
843 		schedule_work(&crtc_ctx->notify_ta_work);
844 
845 	spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1);
846 	for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
847 		crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
848 		crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
849 		crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
850 
851 		if (!crtc_ctx->roi[i].enable) {
852 			crtc_ctx->crc_info.crc[i].frame_count = 0;
853 			continue;
854 		}
855 
856 		if (!crtc_ctx->crc_info.crc[i].crc_ready)
857 			all_crc_ready = false;
858 
859 		if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
860 			/* Reset the reference frame count after user update the ROI
861 			 * or it reaches the maximum value.
862 			 */
863 			crtc_ctx->crc_info.crc[i].frame_count = 0;
864 		else
865 			crtc_ctx->crc_info.crc[i].frame_count += 1;
866 	}
867 	spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
868 
869 	if (all_crc_ready)
870 		complete_all(&crtc_ctx->crc_info.completion);
871 }
872 
873 void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
874 {
875 	struct secure_display_crtc_context *crtc_ctx = NULL;
876 	int i;
877 
878 	crtc_ctx = kcalloc(adev->mode_info.num_crtc,
879 				      sizeof(struct secure_display_crtc_context),
880 				      GFP_KERNEL);
881 
882 	if (!crtc_ctx) {
883 		adev->dm.secure_display_ctx.crtc_ctx = NULL;
884 		return;
885 	}
886 
887 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
888 		INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
889 		INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
890 		crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
891 		spin_lock_init(&crtc_ctx[i].crc_info.lock);
892 	}
893 
894 	adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
895 	return;
896 }
897 #endif
898