131aec354SLeo (Sunpeng) Li /*
231aec354SLeo (Sunpeng) Li * Copyright 2015 Advanced Micro Devices, Inc.
331aec354SLeo (Sunpeng) Li *
431aec354SLeo (Sunpeng) Li * Permission is hereby granted, free of charge, to any person obtaining a
531aec354SLeo (Sunpeng) Li * copy of this software and associated documentation files (the "Software"),
631aec354SLeo (Sunpeng) Li * to deal in the Software without restriction, including without limitation
731aec354SLeo (Sunpeng) Li * the rights to use, copy, modify, merge, publish, distribute, sublicense,
831aec354SLeo (Sunpeng) Li * and/or sell copies of the Software, and to permit persons to whom the
931aec354SLeo (Sunpeng) Li * Software is furnished to do so, subject to the following conditions:
1031aec354SLeo (Sunpeng) Li *
1131aec354SLeo (Sunpeng) Li * The above copyright notice and this permission notice shall be included in
1231aec354SLeo (Sunpeng) Li * all copies or substantial portions of the Software.
1331aec354SLeo (Sunpeng) Li *
1431aec354SLeo (Sunpeng) Li * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1531aec354SLeo (Sunpeng) Li * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1631aec354SLeo (Sunpeng) Li * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1731aec354SLeo (Sunpeng) Li * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
1831aec354SLeo (Sunpeng) Li * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1931aec354SLeo (Sunpeng) Li * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
2031aec354SLeo (Sunpeng) Li * OTHER DEALINGS IN THE SOFTWARE.
2131aec354SLeo (Sunpeng) Li *
2231aec354SLeo (Sunpeng) Li * Authors: AMD
2331aec354SLeo (Sunpeng) Li *
2431aec354SLeo (Sunpeng) Li */
2531aec354SLeo (Sunpeng) Li
2631aec354SLeo (Sunpeng) Li #include <drm/drm_crtc.h>
27f867723bSSam Ravnborg #include <drm/drm_vblank.h>
2831aec354SLeo (Sunpeng) Li
2931aec354SLeo (Sunpeng) Li #include "amdgpu.h"
3031aec354SLeo (Sunpeng) Li #include "amdgpu_dm.h"
3131aec354SLeo (Sunpeng) Li #include "dc.h"
329a65df19SWayne Lin #include "amdgpu_securedisplay.h"
33*ff2e4d87SLeo Li #include "amdgpu_dm_psr.h"
3431aec354SLeo (Sunpeng) Li
358fb843d1SDingchen Zhang static const char *const pipe_crc_sources[] = {
368fb843d1SDingchen Zhang "none",
378fb843d1SDingchen Zhang "crtc",
38f1cdc98fSDingchen Zhang "crtc dither",
398fb843d1SDingchen Zhang "dprx",
40f1cdc98fSDingchen Zhang "dprx dither",
418fb843d1SDingchen Zhang "auto",
428fb843d1SDingchen Zhang };
438fb843d1SDingchen Zhang
dm_parse_crc_source(const char * source)4431aec354SLeo (Sunpeng) Li static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
4531aec354SLeo (Sunpeng) Li {
4631aec354SLeo (Sunpeng) Li if (!source || !strcmp(source, "none"))
4731aec354SLeo (Sunpeng) Li return AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
4814b25846SDingchen Zhang if (!strcmp(source, "auto") || !strcmp(source, "crtc"))
4914b25846SDingchen Zhang return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC;
5014b25846SDingchen Zhang if (!strcmp(source, "dprx"))
5114b25846SDingchen Zhang return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX;
52f1cdc98fSDingchen Zhang if (!strcmp(source, "crtc dither"))
53f1cdc98fSDingchen Zhang return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER;
54f1cdc98fSDingchen Zhang if (!strcmp(source, "dprx dither"))
55f1cdc98fSDingchen Zhang return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER;
5631aec354SLeo (Sunpeng) Li
5731aec354SLeo (Sunpeng) Li return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
5831aec354SLeo (Sunpeng) Li }
5931aec354SLeo (Sunpeng) Li
dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)60f1cdc98fSDingchen Zhang static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src)
61f1cdc98fSDingchen Zhang {
62f1cdc98fSDingchen Zhang return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) ||
63f1cdc98fSDingchen Zhang (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER);
64f1cdc98fSDingchen Zhang }
65f1cdc98fSDingchen Zhang
dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)66f1cdc98fSDingchen Zhang static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src)
67f1cdc98fSDingchen Zhang {
68f1cdc98fSDingchen Zhang return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) ||
69f1cdc98fSDingchen Zhang (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER);
70f1cdc98fSDingchen Zhang }
71f1cdc98fSDingchen Zhang
dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)72f1cdc98fSDingchen Zhang static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src)
73f1cdc98fSDingchen Zhang {
74f1cdc98fSDingchen Zhang return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) ||
75f1cdc98fSDingchen Zhang (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) ||
76f1cdc98fSDingchen Zhang (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE);
77f1cdc98fSDingchen Zhang }
78f1cdc98fSDingchen Zhang
amdgpu_dm_crtc_get_crc_sources(struct drm_crtc * crtc,size_t * count)798fb843d1SDingchen Zhang const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
808fb843d1SDingchen Zhang size_t *count)
818fb843d1SDingchen Zhang {
828fb843d1SDingchen Zhang *count = ARRAY_SIZE(pipe_crc_sources);
838fb843d1SDingchen Zhang return pipe_crc_sources;
848fb843d1SDingchen Zhang }
858fb843d1SDingchen Zhang
8686bc2219SWayne Lin #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
update_phy_id_mapping(struct amdgpu_device * adev)87785b250eSAlan Liu static void update_phy_id_mapping(struct amdgpu_device *adev)
8886bc2219SWayne Lin {
8986bc2219SWayne Lin struct drm_device *ddev = adev_to_drm(adev);
90785b250eSAlan Liu struct amdgpu_display_manager *dm = &adev->dm;
9186bc2219SWayne Lin struct drm_connector *connector;
92785b250eSAlan Liu struct amdgpu_dm_connector *aconnector;
9386bc2219SWayne Lin struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL};
9486bc2219SWayne Lin struct drm_connector_list_iter iter;
95785b250eSAlan Liu uint8_t idx = 0, idx_2 = 0, connector_cnt = 0;
9662fa035bSAlan Liu
9762fa035bSAlan Liu dm->secure_display_ctx.phy_mapping_updated = false;
9862fa035bSAlan Liu
9962fa035bSAlan Liu mutex_lock(&ddev->mode_config.mutex);
100c0459bddSAlan Liu drm_connector_list_iter_begin(ddev, &iter);
101c0459bddSAlan Liu drm_for_each_connector_iter(connector, &iter) {
102c0459bddSAlan Liu
10386bc2219SWayne Lin if (connector->status != connector_status_connected)
104785b250eSAlan Liu continue;
105785b250eSAlan Liu
106785b250eSAlan Liu if (idx >= AMDGPU_DM_MAX_CRTC) {
107785b250eSAlan Liu DRM_WARN("%s connected connectors exceed max crtc\n", __func__);
108785b250eSAlan Liu mutex_unlock(&ddev->mode_config.mutex);
109785b250eSAlan Liu return;
110785b250eSAlan Liu }
111785b250eSAlan Liu
11286bc2219SWayne Lin aconnector = to_amdgpu_dm_connector(connector);
11386bc2219SWayne Lin
1149a65df19SWayne Lin sort_connector[idx] = aconnector;
1159a65df19SWayne Lin idx++;
1161b11ff76SAlan Liu connector_cnt++;
1179a65df19SWayne Lin }
118f6e856e7SAaron Liu drm_connector_list_iter_end(&iter);
1199a65df19SWayne Lin
1201b11ff76SAlan Liu /* sort connectors by link_enc_hw_instance first */
1211b11ff76SAlan Liu for (idx = connector_cnt; idx > 1 ; idx--) {
1229a65df19SWayne Lin for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) {
1239a65df19SWayne Lin if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
1241b11ff76SAlan Liu sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
1251b11ff76SAlan Liu swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
1269a65df19SWayne Lin }
12730a97a21SSrinivasan Shanmugam }
1289a65df19SWayne Lin
1299a65df19SWayne Lin /*
1301b11ff76SAlan Liu * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already
131cbd8f20bSAlan Liu * sorted together above.
132cbd8f20bSAlan Liu */
133cbd8f20bSAlan Liu for (idx = 0; idx < connector_cnt; /*Do nothing*/) {
134cbd8f20bSAlan Liu if (sort_connector[idx]->mst_root) {
135cbd8f20bSAlan Liu uint8_t i, j, k;
136cbd8f20bSAlan Liu uint8_t mst_con_cnt = 1;
1371b11ff76SAlan Liu
1381b11ff76SAlan Liu for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) {
1399a65df19SWayne Lin if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
1401b11ff76SAlan Liu mst_con_cnt++;
1417117007eSAlan Liu else
1427117007eSAlan Liu break;
1439a65df19SWayne Lin }
1449a65df19SWayne Lin
1451b11ff76SAlan Liu for (i = mst_con_cnt; i > 1; i--) {
1461b11ff76SAlan Liu for (j = idx; j < (idx + i - 2); j++) {
1471b11ff76SAlan Liu int mstb_lct = sort_connector[j]->mst_output_port->parent->lct;
1481b11ff76SAlan Liu int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
1491b11ff76SAlan Liu u8 *rad;
1501b11ff76SAlan Liu u8 *next_rad;
1519a65df19SWayne Lin bool swap = false;
1521b11ff76SAlan Liu
1539a65df19SWayne Lin /* Sort by mst tree depth first. Then compare RAD if depth is the same*/
15430a97a21SSrinivasan Shanmugam if (mstb_lct > next_mstb_lct) {
1559a65df19SWayne Lin swap = true;
1569a65df19SWayne Lin } else if (mstb_lct == next_mstb_lct) {
1577117007eSAlan Liu if (mstb_lct == 1) {
1587117007eSAlan Liu if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
1599a65df19SWayne Lin swap = true;
1609a65df19SWayne Lin } else if (mstb_lct > 1) {
161c0459bddSAlan Liu rad = sort_connector[j]->mst_output_port->parent->rad;
162c0459bddSAlan Liu next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
163c0459bddSAlan Liu
1641b11ff76SAlan Liu for (k = 0; k < mstb_lct - 1; k++) {
165c0459bddSAlan Liu int shift = (k % 2) ? 0 : 4;
1661b11ff76SAlan Liu int port_num = (rad[k / 2] >> shift) & 0xf;
1671b11ff76SAlan Liu int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
168c0459bddSAlan Liu
1691b11ff76SAlan Liu if (port_num > next_port_num) {
1701b11ff76SAlan Liu swap = true;
1711b11ff76SAlan Liu break;
1721b11ff76SAlan Liu }
1731b11ff76SAlan Liu }
1741b11ff76SAlan Liu } else {
1751b11ff76SAlan Liu DRM_ERROR("MST LCT shouldn't be set as < 1");
1761b11ff76SAlan Liu mutex_unlock(&ddev->mode_config.mutex);
177c0459bddSAlan Liu return;
178c0459bddSAlan Liu }
1791b11ff76SAlan Liu }
180c0459bddSAlan Liu
181c0459bddSAlan Liu if (swap)
182c0459bddSAlan Liu swap(sort_connector[j], sort_connector[j + 1]);
18386bc2219SWayne Lin }
18486bc2219SWayne Lin }
18586bc2219SWayne Lin
18686bc2219SWayne Lin idx += mst_con_cnt;
18786bc2219SWayne Lin } else {
18886bc2219SWayne Lin idx++;
18986bc2219SWayne Lin }
190c0459bddSAlan Liu }
19186bc2219SWayne Lin
19286bc2219SWayne Lin /* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
19386bc2219SWayne Lin memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping));
19486bc2219SWayne Lin for (idx = 0; idx < connector_cnt; idx++) {
19586bc2219SWayne Lin aconnector = sort_connector[idx];
19686bc2219SWayne Lin
1973b3b8448SMahesh Kumar dm->secure_display_ctx.phy_id_mapping[idx].assigned = true;
1983b3b8448SMahesh Kumar dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false;
1993b3b8448SMahesh Kumar dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst;
2003b3b8448SMahesh Kumar
2013b3b8448SMahesh Kumar if (sort_connector[idx]->mst_root) {
2023b3b8448SMahesh Kumar dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true;
2033b3b8448SMahesh Kumar dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct;
2043b3b8448SMahesh Kumar dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num;
2053b3b8448SMahesh Kumar memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad,
2063b3b8448SMahesh Kumar aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad));
2073b3b8448SMahesh Kumar }
2083b3b8448SMahesh Kumar }
2093b3b8448SMahesh Kumar mutex_unlock(&ddev->mode_config.mutex);
2103b3b8448SMahesh Kumar
2113b3b8448SMahesh Kumar dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt;
2123b3b8448SMahesh Kumar dm->secure_display_ctx.phy_mapping_updated = true;
21357638021SNicholas Kazlauskas }
21457638021SNicholas Kazlauskas
get_phy_id(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,uint8_t * phy_id)21557638021SNicholas Kazlauskas static bool get_phy_id(struct amdgpu_display_manager *dm,
21631aec354SLeo (Sunpeng) Li struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
2171348969aSLuben Tuikov {
21857638021SNicholas Kazlauskas int idx, idx_2;
21957638021SNicholas Kazlauskas bool found = false;
22057638021SNicholas Kazlauskas
22157638021SNicholas Kazlauskas /*
22257638021SNicholas Kazlauskas * Assume secure display start after all connectors are probed. The connection
22357638021SNicholas Kazlauskas * config is static as well
224bbc49fc0SWayne Lin */
22557638021SNicholas Kazlauskas if (!dm->secure_display_ctx.phy_mapping_updated) {
22657638021SNicholas Kazlauskas DRM_WARN("%s Should update the phy id table before get it's value", __func__);
22757638021SNicholas Kazlauskas return false;
228*ff2e4d87SLeo Li }
229*ff2e4d87SLeo Li
230*ff2e4d87SLeo Li for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) {
231*ff2e4d87SLeo Li if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
2321b11ff76SAlan Liu DRM_ERROR("phy_id_mapping[%d] should be assigned", idx);
2331c26a1bfSWayne Lin return false;
23457638021SNicholas Kazlauskas }
235e2881d6dSRodrigo Siqueira
23657638021SNicholas Kazlauskas if (aconnector->dc_link->link_enc_hw_inst ==
23757638021SNicholas Kazlauskas dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) {
23857638021SNicholas Kazlauskas if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
23957638021SNicholas Kazlauskas found = true;
24057638021SNicholas Kazlauskas goto out;
24157638021SNicholas Kazlauskas } else {
24290d26874SRobin Singh /* Could caused by wrongly pass mst root connector */
24357638021SNicholas Kazlauskas if (!aconnector->mst_output_port) {
24490d26874SRobin Singh DRM_ERROR("%s Check mst case but connector without a port assigned", __func__);
24590d26874SRobin Singh return false;
24690d26874SRobin Singh }
24757638021SNicholas Kazlauskas
24857638021SNicholas Kazlauskas if (aconnector->mst_root &&
24990d26874SRobin Singh aconnector->mst_root->mst_mgr.mst_primary == NULL) {
25090d26874SRobin Singh DRM_WARN("%s pass in a stale mst connector", __func__);
25190d26874SRobin Singh }
25257638021SNicholas Kazlauskas
25357638021SNicholas Kazlauskas if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
25457638021SNicholas Kazlauskas aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) {
25557638021SNicholas Kazlauskas if (aconnector->mst_output_port->parent->lct == 1) {
25657638021SNicholas Kazlauskas found = true;
25757638021SNicholas Kazlauskas goto out;
25857638021SNicholas Kazlauskas } else if (aconnector->mst_output_port->parent->lct > 1) {
25957638021SNicholas Kazlauskas /* Check RAD */
26057638021SNicholas Kazlauskas for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) {
261452575c5SNicholas Kazlauskas int shift = (idx_2 % 2) ? 0 : 4;
2628e7b6feeSWayne Lin int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf;
263452575c5SNicholas Kazlauskas int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
264452575c5SNicholas Kazlauskas
2658e7b6feeSWayne Lin if (port_num != port_num2)
2668e7b6feeSWayne Lin break;
26714b25846SDingchen Zhang }
26814b25846SDingchen Zhang
26914b25846SDingchen Zhang if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
270452575c5SNicholas Kazlauskas found = true;
27131aec354SLeo (Sunpeng) Li goto out;
27231aec354SLeo (Sunpeng) Li }
27331aec354SLeo (Sunpeng) Li } else {
27431aec354SLeo (Sunpeng) Li DRM_ERROR("lCT should be >= 1");
27531aec354SLeo (Sunpeng) Li return false;
27631aec354SLeo (Sunpeng) Li }
27731aec354SLeo (Sunpeng) Li }
278452575c5SNicholas Kazlauskas }
279452575c5SNicholas Kazlauskas }
280452575c5SNicholas Kazlauskas }
281452575c5SNicholas Kazlauskas
282452575c5SNicholas Kazlauskas out:
283452575c5SNicholas Kazlauskas if (found) {
284452575c5SNicholas Kazlauskas DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
285452575c5SNicholas Kazlauskas *phy_id = idx;
286452575c5SNicholas Kazlauskas } else {
287452575c5SNicholas Kazlauskas DRM_WARN("Can't find associated phy ID");
288452575c5SNicholas Kazlauskas return false;
289452575c5SNicholas Kazlauskas }
290452575c5SNicholas Kazlauskas
291452575c5SNicholas Kazlauskas return true;
292452575c5SNicholas Kazlauskas }
293452575c5SNicholas Kazlauskas
amdgpu_dm_set_crc_window_default(struct drm_crtc * crtc,struct dc_stream_state * stream)294452575c5SNicholas Kazlauskas static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
295452575c5SNicholas Kazlauskas {
296452575c5SNicholas Kazlauskas struct drm_device *drm_dev = crtc->dev;
297452575c5SNicholas Kazlauskas struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
298452575c5SNicholas Kazlauskas struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
299452575c5SNicholas Kazlauskas struct amdgpu_dm_connector *aconnector;
300452575c5SNicholas Kazlauskas bool was_activated;
301452575c5SNicholas Kazlauskas uint8_t phy_id;
302452575c5SNicholas Kazlauskas unsigned long flags;
30314b25846SDingchen Zhang int i;
304452575c5SNicholas Kazlauskas
3058e7b6feeSWayne Lin spin_lock_irqsave(&drm_dev->event_lock, flags);
3068e7b6feeSWayne Lin was_activated = acrtc->dm_irq_params.crc_window_activated;
3078e7b6feeSWayne Lin for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
308428da2bdSNicholas Kazlauskas acrtc->dm_irq_params.window_param[i].x_start = 0;
30914b25846SDingchen Zhang acrtc->dm_irq_params.window_param[i].y_start = 0;
31014b25846SDingchen Zhang acrtc->dm_irq_params.window_param[i].x_end = 0;
31114b25846SDingchen Zhang acrtc->dm_irq_params.window_param[i].y_end = 0;
31214b25846SDingchen Zhang acrtc->dm_irq_params.window_param[i].enable = false;
313f1cdc98fSDingchen Zhang acrtc->dm_irq_params.window_param[i].update_win = false;
314f1cdc98fSDingchen Zhang acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0;
315f1cdc98fSDingchen Zhang }
316f1cdc98fSDingchen Zhang acrtc->dm_irq_params.crc_window_activated = false;
317f1cdc98fSDingchen Zhang spin_unlock_irqrestore(&drm_dev->event_lock, flags);
318f1cdc98fSDingchen Zhang
319f1cdc98fSDingchen Zhang /* Disable secure_display if it was enabled */
320f1cdc98fSDingchen Zhang if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) {
32114b25846SDingchen Zhang /* stop ROI update on this crtc */
322f1cdc98fSDingchen Zhang flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
32314b25846SDingchen Zhang flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
3248e7b6feeSWayne Lin aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
325df61eae4SNicholas Kazlauskas
326df61eae4SNicholas Kazlauskas if (aconnector && get_phy_id(dm, aconnector, &phy_id)) {
327df61eae4SNicholas Kazlauskas if (dm->secure_display_ctx.support_mul_roi)
328df61eae4SNicholas Kazlauskas dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true);
329df61eae4SNicholas Kazlauskas else
330df61eae4SNicholas Kazlauskas dc_stream_forward_crc_window(stream, NULL, phy_id, true);
331df61eae4SNicholas Kazlauskas } else {
332df61eae4SNicholas Kazlauskas DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
333df61eae4SNicholas Kazlauskas }
3347db7ade2SHarry Wentland }
3357db7ade2SHarry Wentland }
3367db7ade2SHarry Wentland
amdgpu_dm_crtc_notify_ta_to_read(struct work_struct * work)337df61eae4SNicholas Kazlauskas static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
338df61eae4SNicholas Kazlauskas {
339df61eae4SNicholas Kazlauskas struct secure_display_crtc_context *crtc_ctx;
340df61eae4SNicholas Kazlauskas struct psp_context *psp;
34114b25846SDingchen Zhang struct ta_securedisplay_cmd *securedisplay_cmd;
34214b25846SDingchen Zhang struct drm_crtc *crtc;
34314b25846SDingchen Zhang struct dc_stream_state *stream;
344452575c5SNicholas Kazlauskas struct amdgpu_dm_connector *aconnector;
345452575c5SNicholas Kazlauskas uint8_t phy_inst;
34614b25846SDingchen Zhang struct amdgpu_display_manager *dm;
34714b25846SDingchen Zhang struct crc_data crc_cpy[MAX_CRC_WINDOW_NUM];
348f0127cb1SWayne Lin unsigned long flags;
34914b25846SDingchen Zhang uint8_t roi_idx = 0;
35014b25846SDingchen Zhang int ret;
35114b25846SDingchen Zhang int i;
352452575c5SNicholas Kazlauskas
353452575c5SNicholas Kazlauskas crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
35443a6a02eSNicholas Kazlauskas crtc = crtc_ctx->crtc;
3552da34b7bSPerry Yuan
3562da34b7bSPerry Yuan if (!crtc)
3572da34b7bSPerry Yuan return;
3582da34b7bSPerry Yuan
3592da34b7bSPerry Yuan psp = &drm_to_adev(crtc->dev)->psp;
3602da34b7bSPerry Yuan
3612da34b7bSPerry Yuan if (!psp->securedisplay_context.context.initialized) {
3622da34b7bSPerry Yuan DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n");
36314b25846SDingchen Zhang return;
364428da2bdSNicholas Kazlauskas }
365*ff2e4d87SLeo Li
366*ff2e4d87SLeo Li dm = &drm_to_adev(crtc->dev)->dm;
367*ff2e4d87SLeo Li stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
368*ff2e4d87SLeo Li aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
369*ff2e4d87SLeo Li if (!aconnector)
370*ff2e4d87SLeo Li return;
371*ff2e4d87SLeo Li
372*ff2e4d87SLeo Li mutex_lock(&crtc->dev->mode_config.mutex);
373*ff2e4d87SLeo Li if (!get_phy_id(dm, aconnector, &phy_inst)) {
374*ff2e4d87SLeo Li DRM_WARN("%s Can't find mapping phy id!", __func__);
375*ff2e4d87SLeo Li mutex_unlock(&crtc->dev->mode_config.mutex);
37686bc2219SWayne Lin return;
3771b11ff76SAlan Liu }
378785b250eSAlan Liu mutex_unlock(&crtc->dev->mode_config.mutex);
37986bc2219SWayne Lin
38086bc2219SWayne Lin spin_lock_irqsave(&crtc->dev->event_lock, flags);
381452575c5SNicholas Kazlauskas memcpy(crc_cpy, crtc_ctx->crc_info.crc, sizeof(struct crc_data) * MAX_CRC_WINDOW_NUM);
382452575c5SNicholas Kazlauskas spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
383452575c5SNicholas Kazlauskas
384452575c5SNicholas Kazlauskas /* need lock for multiple crtcs to use the command buffer */
38543a6a02eSNicholas Kazlauskas mutex_lock(&psp->securedisplay_context.mutex);
38614b25846SDingchen Zhang /* PSP TA is expected to finish data transmission over I2C within current frame,
387f1cdc98fSDingchen Zhang * even there are up to 4 crtcs request to send in this frame.
38814b25846SDingchen Zhang */
38914b25846SDingchen Zhang if (dm->secure_display_ctx.support_mul_roi) {
390452575c5SNicholas Kazlauskas psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
391452575c5SNicholas Kazlauskas TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
39214b25846SDingchen Zhang
39314b25846SDingchen Zhang securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.phy_id = phy_inst;
39414b25846SDingchen Zhang
395428da2bdSNicholas Kazlauskas for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
396f1cdc98fSDingchen Zhang if (crc_cpy[i].crc_ready)
39714b25846SDingchen Zhang roi_idx |= 1 << i;
39814b25846SDingchen Zhang }
399452575c5SNicholas Kazlauskas securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx;
400452575c5SNicholas Kazlauskas
40114b25846SDingchen Zhang ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
40214b25846SDingchen Zhang } else {
40314b25846SDingchen Zhang psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
404428da2bdSNicholas Kazlauskas TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
4058e7b6feeSWayne Lin
4068e7b6feeSWayne Lin securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
4078e7b6feeSWayne Lin
40831aec354SLeo (Sunpeng) Li ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
409a0a31ec4SLeo (Sunpeng) Li }
410a0a31ec4SLeo (Sunpeng) Li
411452575c5SNicholas Kazlauskas if (!ret) {
412452575c5SNicholas Kazlauskas if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
413452575c5SNicholas Kazlauskas psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
414452575c5SNicholas Kazlauskas }
415452575c5SNicholas Kazlauskas
416452575c5SNicholas Kazlauskas mutex_unlock(&psp->securedisplay_context.mutex);
417452575c5SNicholas Kazlauskas }
418452575c5SNicholas Kazlauskas
41931aec354SLeo (Sunpeng) Li static void
amdgpu_dm_forward_crc_window(struct work_struct * work)42031aec354SLeo (Sunpeng) Li amdgpu_dm_forward_crc_window(struct work_struct *work)
42131aec354SLeo (Sunpeng) Li {
42231aec354SLeo (Sunpeng) Li struct secure_display_crtc_context *crtc_ctx;
42331aec354SLeo (Sunpeng) Li struct amdgpu_display_manager *dm;
42431aec354SLeo (Sunpeng) Li struct drm_crtc *crtc;
42531aec354SLeo (Sunpeng) Li struct dc_stream_state *stream;
42631aec354SLeo (Sunpeng) Li struct amdgpu_dm_connector *aconnector;
42731aec354SLeo (Sunpeng) Li struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM];
42831aec354SLeo (Sunpeng) Li unsigned long flags;
42931aec354SLeo (Sunpeng) Li uint8_t phy_id;
430dddc0557SNicholas Kazlauskas
431dddc0557SNicholas Kazlauskas crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
4328e7b6feeSWayne Lin crtc = crtc_ctx->crtc;
4338e7b6feeSWayne Lin
4348e7b6feeSWayne Lin if (!crtc)
43531aec354SLeo (Sunpeng) Li return;
4368e7b6feeSWayne Lin
43731aec354SLeo (Sunpeng) Li dm = &drm_to_adev(crtc->dev)->dm;
438dddc0557SNicholas Kazlauskas stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
439dddc0557SNicholas Kazlauskas aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
440dddc0557SNicholas Kazlauskas
441dddc0557SNicholas Kazlauskas if (!aconnector)
442dddc0557SNicholas Kazlauskas return;
4438e7b6feeSWayne Lin
4448e7b6feeSWayne Lin mutex_lock(&crtc->dev->mode_config.mutex);
4458e7b6feeSWayne Lin if (!get_phy_id(dm, aconnector, &phy_id)) {
4468e7b6feeSWayne Lin DRM_WARN("%s Can't find mapping phy id!", __func__);
4478e7b6feeSWayne Lin mutex_unlock(&crtc->dev->mode_config.mutex);
4488e7b6feeSWayne Lin return;
449dddc0557SNicholas Kazlauskas }
450d1bd7d61SLeo (Sunpeng) Li mutex_unlock(&crtc->dev->mode_config.mutex);
4518e7b6feeSWayne Lin
452d1bd7d61SLeo (Sunpeng) Li spin_lock_irqsave(&crtc->dev->event_lock, flags);
453d1bd7d61SLeo (Sunpeng) Li memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM);
45431aec354SLeo (Sunpeng) Li spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
45531aec354SLeo (Sunpeng) Li
45631aec354SLeo (Sunpeng) Li mutex_lock(&dm->dc_lock);
457a0a31ec4SLeo (Sunpeng) Li if (dm->secure_display_ctx.support_mul_roi)
458a0a31ec4SLeo (Sunpeng) Li dc_stream_forward_multiple_crc_window(stream, roi_cpy,
45931aec354SLeo (Sunpeng) Li phy_id, false);
460a0a31ec4SLeo (Sunpeng) Li else
461a0a31ec4SLeo (Sunpeng) Li dc_stream_forward_crc_window(stream, &roi_cpy[0].rect,
46231aec354SLeo (Sunpeng) Li phy_id, false);
46331aec354SLeo (Sunpeng) Li mutex_unlock(&dm->dc_lock);
46431aec354SLeo (Sunpeng) Li }
4658e7b6feeSWayne Lin
amdgpu_dm_crc_window_is_activated(struct drm_crtc * crtc)46631aec354SLeo (Sunpeng) Li bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
46731aec354SLeo (Sunpeng) Li {
46831aec354SLeo (Sunpeng) Li struct drm_device *drm_dev = crtc->dev;
46931aec354SLeo (Sunpeng) Li struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
47031aec354SLeo (Sunpeng) Li bool ret = false;
47131aec354SLeo (Sunpeng) Li
47231aec354SLeo (Sunpeng) Li spin_lock_irq(&drm_dev->event_lock);
47314b25846SDingchen Zhang ret = acrtc->dm_irq_params.crc_window_activated;
47486bc2219SWayne Lin spin_unlock_irq(&drm_dev->event_lock);
47586bc2219SWayne Lin
47686bc2219SWayne Lin return ret;
47786bc2219SWayne Lin }
47886bc2219SWayne Lin #endif
47986bc2219SWayne Lin
48086bc2219SWayne Lin int
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc * crtc,const char * src_name,size_t * values_cnt)4819a65df19SWayne Lin amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
4821b11ff76SAlan Liu size_t *values_cnt)
4831b11ff76SAlan Liu {
48486bc2219SWayne Lin enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
48586bc2219SWayne Lin
48686bc2219SWayne Lin if (source < 0) {
48786bc2219SWayne Lin DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
48886bc2219SWayne Lin src_name, crtc->index);
4899a65df19SWayne Lin return -EINVAL;
49086bc2219SWayne Lin }
49186bc2219SWayne Lin
492cd95ef00SWayne Lin *values_cnt = 3;
49386bc2219SWayne Lin return 0;
49486bc2219SWayne Lin }
49586bc2219SWayne Lin
amdgpu_dm_crtc_configure_crc_source(struct drm_crtc * crtc,struct dm_crtc_state * dm_crtc_state,enum amdgpu_dm_pipe_crc_source source)4961b11ff76SAlan Liu int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
4971b11ff76SAlan Liu struct dm_crtc_state *dm_crtc_state,
498c0459bddSAlan Liu enum amdgpu_dm_pipe_crc_source source)
499c0459bddSAlan Liu {
500c0459bddSAlan Liu struct amdgpu_device *adev = drm_to_adev(crtc->dev);
501c0459bddSAlan Liu struct dc_stream_state *stream_state = dm_crtc_state->stream;
502c0459bddSAlan Liu bool enable = amdgpu_dm_is_valid_crc_source(source);
503c0459bddSAlan Liu int ret = 0;
504c0459bddSAlan Liu
5059a65df19SWayne Lin /* Configuration will be deferred to stream enable. */
5069a65df19SWayne Lin if (!stream_state)
50786bc2219SWayne Lin return -EINVAL;
5081b11ff76SAlan Liu
509b8ff7e08SAlan Liu mutex_lock(&adev->dm.dc_lock);
510b8ff7e08SAlan Liu
511b8ff7e08SAlan Liu /* For PSR1, check that the panel has exited PSR */
512b8ff7e08SAlan Liu if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
5131b11ff76SAlan Liu amdgpu_dm_psr_wait_disable(stream_state);
514b8ff7e08SAlan Liu
51586bc2219SWayne Lin /* Enable or disable CRTC CRC generation */
5161b11ff76SAlan Liu if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
5171b11ff76SAlan Liu if (!dc_stream_configure_crc(stream_state->ctx->dc,
5181b11ff76SAlan Liu stream_state, NULL, enable, enable, 0, true)) {
5191b11ff76SAlan Liu ret = -EINVAL;
5201b11ff76SAlan Liu goto unlock;
52162fa035bSAlan Liu }
5221b11ff76SAlan Liu }
52362fa035bSAlan Liu
5241b11ff76SAlan Liu /* Configure dithering */
52586bc2219SWayne Lin if (!dm_need_crc_dither(source)) {
526c0459bddSAlan Liu dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
5271b11ff76SAlan Liu dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
5281b11ff76SAlan Liu DYN_EXPANSION_DISABLE);
5291b11ff76SAlan Liu } else {
5301b11ff76SAlan Liu dc_stream_set_dither_option(stream_state,
5311b11ff76SAlan Liu DITHER_OPTION_DEFAULT);
5321b11ff76SAlan Liu dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
533c0459bddSAlan Liu DYN_EXPANSION_AUTO);
5349a65df19SWayne Lin }
5359a65df19SWayne Lin
5361b11ff76SAlan Liu unlock:
5371b11ff76SAlan Liu mutex_unlock(&adev->dm.dc_lock);
53886bc2219SWayne Lin
53986bc2219SWayne Lin return ret;
54086bc2219SWayne Lin }
541cd95ef00SWayne Lin
amdgpu_dm_crtc_set_crc_source(struct drm_crtc * crtc,const char * src_name)54286bc2219SWayne Lin int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
5439a65df19SWayne Lin {
5441b11ff76SAlan Liu enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
545b8ff7e08SAlan Liu enum amdgpu_dm_pipe_crc_source cur_crc_src;
5469a65df19SWayne Lin struct drm_crtc_commit *commit;
5471b11ff76SAlan Liu struct dm_crtc_state *crtc_state;
5481b11ff76SAlan Liu struct drm_device *drm_dev = crtc->dev;
5499a65df19SWayne Lin #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
550c3d74960SHamza Mahfooz struct amdgpu_device *adev = drm_to_adev(drm_dev);
551c3d74960SHamza Mahfooz struct amdgpu_display_manager *dm = &adev->dm;
552c3d74960SHamza Mahfooz #endif
5539a65df19SWayne Lin struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5541b11ff76SAlan Liu struct drm_dp_aux *aux = NULL;
5559a65df19SWayne Lin bool enable = false;
5569a65df19SWayne Lin bool enabled = false;
557c3d74960SHamza Mahfooz int ret = 0;
5581b11ff76SAlan Liu
5591b11ff76SAlan Liu if (source < 0) {
560b8ff7e08SAlan Liu DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
5611b11ff76SAlan Liu src_name, crtc->index);
5629a65df19SWayne Lin return -EINVAL;
5631b11ff76SAlan Liu }
5649a65df19SWayne Lin
56586bc2219SWayne Lin ret = drm_modeset_lock(&crtc->mutex, NULL);
566 if (ret)
567 return ret;
568
569 spin_lock(&crtc->commit_lock);
570 commit = list_first_entry_or_null(&crtc->commit_list,
571 struct drm_crtc_commit, commit_entry);
572 if (commit)
573 drm_crtc_commit_get(commit);
574 spin_unlock(&crtc->commit_lock);
575
576 if (commit) {
577 /*
578 * Need to wait for all outstanding programming to complete
579 * in commit tail since it can modify CRC related fields and
580 * hardware state. Since we're holding the CRTC lock we're
581 * guaranteed that no other commit work can be queued off
582 * before we modify the state below.
583 */
584 ret = wait_for_completion_interruptible_timeout(
585 &commit->hw_done, 10 * HZ);
586 if (ret)
587 goto cleanup;
588 }
589
590 enable = amdgpu_dm_is_valid_crc_source(source);
591 crtc_state = to_dm_crtc_state(crtc->state);
592 spin_lock_irq(&drm_dev->event_lock);
593 cur_crc_src = acrtc->dm_irq_params.crc_src;
594 spin_unlock_irq(&drm_dev->event_lock);
595
596 /*
597 * USER REQ SRC | CURRENT SRC | BEHAVIOR
598 * -----------------------------
599 * None | None | Do nothing
600 * None | CRTC | Disable CRTC CRC, set default to dither
601 * None | DPRX | Disable DPRX CRC, need 'aux', set default to dither
602 * None | CRTC DITHER | Disable CRTC CRC
603 * None | DPRX DITHER | Disable DPRX CRC, need 'aux'
604 * CRTC | XXXX | Enable CRTC CRC, no dither
605 * DPRX | XXXX | Enable DPRX CRC, need 'aux', no dither
606 * CRTC DITHER | XXXX | Enable CRTC CRC, set dither
607 * DPRX DITHER | XXXX | Enable DPRX CRC, need 'aux', set dither
608 */
609 if (dm_is_crc_source_dprx(source) ||
610 (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE &&
611 dm_is_crc_source_dprx(cur_crc_src))) {
612 struct amdgpu_dm_connector *aconn = NULL;
613 struct drm_connector *connector;
614 struct drm_connector_list_iter conn_iter;
615
616 drm_connector_list_iter_begin(crtc->dev, &conn_iter);
617 drm_for_each_connector_iter(connector, &conn_iter) {
618 if (!connector->state || connector->state->crtc != crtc)
619 continue;
620
621 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
622 continue;
623
624 aconn = to_amdgpu_dm_connector(connector);
625 break;
626 }
627 drm_connector_list_iter_end(&conn_iter);
628
629 if (!aconn) {
630 DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index);
631 ret = -EINVAL;
632 goto cleanup;
633 }
634
635 aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux;
636
637 if (!aux) {
638 DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
639 ret = -EINVAL;
640 goto cleanup;
641 }
642
643 if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
644 (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
645 DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
646 ret = -EINVAL;
647 goto cleanup;
648 }
649
650 }
651
652 /*
653 * Reading the CRC requires the vblank interrupt handler to be
654 * enabled. Keep a reference until CRC capture stops.
655 */
656 enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
657 if (!enabled && enable) {
658 ret = drm_crtc_vblank_get(crtc);
659 if (ret)
660 goto cleanup;
661 }
662
663 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
664 /* Reset secure_display when we change crc source from debugfs */
665 amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
666 #endif
667
668 if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
669 ret = -EINVAL;
670 goto cleanup;
671 }
672
673 if (!enabled && enable) {
674 if (dm_is_crc_source_dprx(source)) {
675 if (drm_dp_start_crc(aux, crtc)) {
676 DRM_DEBUG_DRIVER("dp start crc failed\n");
677 ret = -EINVAL;
678 goto cleanup;
679 }
680 }
681 } else if (enabled && !enable) {
682 drm_crtc_vblank_put(crtc);
683 if (dm_is_crc_source_dprx(source)) {
684 if (drm_dp_stop_crc(aux)) {
685 DRM_DEBUG_DRIVER("dp stop crc failed\n");
686 ret = -EINVAL;
687 goto cleanup;
688 }
689 }
690 }
691
692 spin_lock_irq(&drm_dev->event_lock);
693 acrtc->dm_irq_params.crc_src = source;
694 spin_unlock_irq(&drm_dev->event_lock);
695
696 /* Reset crc_skipped on dm state */
697 crtc_state->crc_skip_count = 0;
698
699 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
700 /* Initialize phy id mapping table for secure display*/
701 if (dm->secure_display_ctx.op_mode == LEGACY_MODE &&
702 !dm->secure_display_ctx.phy_mapping_updated)
703 update_phy_id_mapping(adev);
704 #endif
705
706 cleanup:
707 if (commit)
708 drm_crtc_commit_put(commit);
709
710 drm_modeset_unlock(&crtc->mutex);
711
712 return ret;
713 }
714
715 /**
716 * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC.
717 * @crtc: DRM CRTC object.
718 *
719 * This function should be called at the end of a vblank, when the fb has been
720 * fully processed through the pipe.
721 */
amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc * crtc)722 void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
723 {
724 struct dm_crtc_state *crtc_state;
725 struct dc_stream_state *stream_state;
726 struct drm_device *drm_dev = NULL;
727 enum amdgpu_dm_pipe_crc_source cur_crc_src;
728 struct amdgpu_crtc *acrtc = NULL;
729 uint32_t crcs[3];
730 unsigned long flags;
731
732 if (crtc == NULL)
733 return;
734
735 crtc_state = to_dm_crtc_state(crtc->state);
736 stream_state = crtc_state->stream;
737 acrtc = to_amdgpu_crtc(crtc);
738 drm_dev = crtc->dev;
739
740 spin_lock_irqsave(&drm_dev->event_lock, flags);
741 cur_crc_src = acrtc->dm_irq_params.crc_src;
742 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
743
744 /* Early return if CRC capture is not enabled. */
745 if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
746 return;
747
748 /*
749 * Since flipping and crc enablement happen asynchronously, we - more
750 * often than not - will be returning an 'uncooked' crc on first frame.
751 * Probably because hw isn't ready yet. For added security, skip the
752 * first two CRC values.
753 */
754 if (crtc_state->crc_skip_count < 2) {
755 crtc_state->crc_skip_count += 1;
756 return;
757 }
758
759 if (dm_is_crc_source_crtc(cur_crc_src)) {
760 if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
761 &crcs[0], &crcs[1], &crcs[2]))
762 return;
763
764 drm_crtc_add_crc_entry(crtc, true,
765 drm_crtc_accurate_vblank_count(crtc), crcs);
766 }
767 }
768
769 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc * crtc)770 void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
771 {
772 struct drm_device *drm_dev = NULL;
773 enum amdgpu_dm_pipe_crc_source cur_crc_src;
774 struct amdgpu_crtc *acrtc = NULL;
775 struct amdgpu_device *adev = NULL;
776 struct secure_display_crtc_context *crtc_ctx = NULL;
777 bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false};
778 uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0};
779 uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0};
780 uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0};
781 unsigned long flags1;
782 bool forward_roi_change = false;
783 bool notify_ta = false;
784 bool all_crc_ready = true;
785 struct dc_stream_state *stream_state;
786 int i;
787
788 if (crtc == NULL)
789 return;
790
791 acrtc = to_amdgpu_crtc(crtc);
792 adev = drm_to_adev(crtc->dev);
793 drm_dev = crtc->dev;
794 stream_state = to_dm_crtc_state(crtc->state)->stream;
795
796 spin_lock_irqsave(&drm_dev->event_lock, flags1);
797 cur_crc_src = acrtc->dm_irq_params.crc_src;
798
799 /* Early return if CRC capture is not enabled. */
800 if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
801 !dm_is_crc_source_crtc(cur_crc_src)) {
802 spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
803 return;
804 }
805
806 if (!acrtc->dm_irq_params.crc_window_activated) {
807 spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
808 return;
809 }
810
811 crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
812 if (WARN_ON(crtc_ctx->crtc != crtc)) {
813 /* We have set the crtc when creating secure_display_crtc_context,
814 * don't expect it to be changed here.
815 */
816 crtc_ctx->crtc = crtc;
817 }
818
819 for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
820 struct crc_params crc_window = {
821 .windowa_x_start = acrtc->dm_irq_params.window_param[i].x_start,
822 .windowa_y_start = acrtc->dm_irq_params.window_param[i].y_start,
823 .windowa_x_end = acrtc->dm_irq_params.window_param[i].x_end,
824 .windowa_y_end = acrtc->dm_irq_params.window_param[i].y_end,
825 .windowb_x_start = acrtc->dm_irq_params.window_param[i].x_start,
826 .windowb_y_start = acrtc->dm_irq_params.window_param[i].y_start,
827 .windowb_x_end = acrtc->dm_irq_params.window_param[i].x_end,
828 .windowb_y_end = acrtc->dm_irq_params.window_param[i].y_end,
829 };
830
831 crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable;
832
833 if (!acrtc->dm_irq_params.window_param[i].enable) {
834 crtc_ctx->crc_info.crc[i].crc_ready = false;
835 continue;
836 }
837
838 if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) {
839 acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1;
840 crtc_ctx->crc_info.crc[i].crc_ready = false;
841 continue;
842 }
843
844 if (acrtc->dm_irq_params.window_param[i].update_win) {
845 crtc_ctx->roi[i].rect.x = crc_window.windowa_x_start;
846 crtc_ctx->roi[i].rect.y = crc_window.windowa_y_start;
847 crtc_ctx->roi[i].rect.width = crc_window.windowa_x_end -
848 crc_window.windowa_x_start;
849 crtc_ctx->roi[i].rect.height = crc_window.windowa_y_end -
850 crc_window.windowa_y_start;
851
852 if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
853 /* forward task to dmub to update ROI */
854 forward_roi_change = true;
855 else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
856 /* update ROI via dm*/
857 dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
858 &crc_window, true, true, i, false);
859
860 reset_crc_frame_count[i] = true;
861
862 acrtc->dm_irq_params.window_param[i].update_win = false;
863
864 /* Statically skip 1 frame, because we may need to wait below things
865 * before sending ROI to dmub:
866 * 1. We defer the work by using system workqueue.
867 * 2. We may need to wait for dc_lock before accessing dmub.
868 */
869 acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
870 crtc_ctx->crc_info.crc[i].crc_ready = false;
871 } else {
872 if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
873 &crc_r[i], &crc_g[i], &crc_b[i]))
874 DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
875
876 if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
877 /* forward task to psp to read ROI/CRC and output via I2C */
878 notify_ta = true;
879 else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
880 /* Avoid ROI window get changed, keep overwriting. */
881 dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
882 &crc_window, true, true, i, false);
883
884 /* crc ready for psp to read out */
885 crtc_ctx->crc_info.crc[i].crc_ready = true;
886 }
887 }
888
889 spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
890
891 if (forward_roi_change)
892 schedule_work(&crtc_ctx->forward_roi_work);
893
894 if (notify_ta)
895 schedule_work(&crtc_ctx->notify_ta_work);
896
897 spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1);
898 for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
899 crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
900 crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
901 crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
902
903 if (!crtc_ctx->roi[i].enable) {
904 crtc_ctx->crc_info.crc[i].frame_count = 0;
905 continue;
906 }
907
908 if (!crtc_ctx->crc_info.crc[i].crc_ready)
909 all_crc_ready = false;
910
911 if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
912 /* Reset the reference frame count after user update the ROI
913 * or it reaches the maximum value.
914 */
915 crtc_ctx->crc_info.crc[i].frame_count = 0;
916 else
917 crtc_ctx->crc_info.crc[i].frame_count += 1;
918 }
919 spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
920
921 if (all_crc_ready)
922 complete_all(&crtc_ctx->crc_info.completion);
923 }
924
amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device * adev)925 void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
926 {
927 struct secure_display_crtc_context *crtc_ctx = NULL;
928 int i;
929
930 crtc_ctx = kcalloc(adev->mode_info.num_crtc,
931 sizeof(struct secure_display_crtc_context),
932 GFP_KERNEL);
933
934 if (!crtc_ctx) {
935 adev->dm.secure_display_ctx.crtc_ctx = NULL;
936 return;
937 }
938
939 for (i = 0; i < adev->mode_info.num_crtc; i++) {
940 INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
941 INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
942 crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
943 spin_lock_init(&crtc_ctx[i].crc_info.lock);
944 }
945
946 adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
947
948 adev->dm.secure_display_ctx.op_mode = DISPLAY_CRC_MODE;
949 }
950 #endif
951