1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/string.h> 27 #include <linux/acpi.h> 28 #include <linux/i2c.h> 29 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_probe_helper.h> 32 #include <drm/amdgpu_drm.h> 33 #include <drm/drm_edid.h> 34 #include <drm/drm_fixed.h> 35 36 #include "dm_services.h" 37 #include "amdgpu.h" 38 #include "dc.h" 39 #include "amdgpu_dm.h" 40 #include "amdgpu_dm_irq.h" 41 #include "amdgpu_dm_mst_types.h" 42 #include "dpcd_defs.h" 43 #include "dc/inc/core_types.h" 44 45 #include "dm_helpers.h" 46 #include "ddc_service_types.h" 47 48 static u32 edid_extract_panel_id(struct edid *edid) 49 { 50 return (u32)edid->mfg_id[0] << 24 | 51 (u32)edid->mfg_id[1] << 16 | 52 (u32)EDID_PRODUCT_ID(edid); 53 } 54 55 static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) 56 { 57 uint32_t panel_id = edid_extract_panel_id(edid); 58 59 switch (panel_id) { 60 /* Workaround for some monitors which does not work well with FAMS */ 61 case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): 62 case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): 63 case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC): 64 DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id); 65 edid_caps->panel_patch.disable_fams = true; 66 break; 67 /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ 68 case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB): 69 case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B): 70 case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A): 71 case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1): 72 case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003): 73 DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); 74 edid_caps->panel_patch.remove_sink_ext_caps = true; 75 break; 76 case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): 77 DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id); 78 edid_caps->panel_patch.disable_colorimetry = true; 79 break; 80 default: 81 return; 82 } 83 } 84 85 /** 86 * dm_helpers_parse_edid_caps() - Parse edid caps 87 * 88 * @link: current detected link 89 * @edid: [in] pointer to edid 90 * @edid_caps: [in] pointer to edid caps 91 * 92 * Return: void 93 */ 94 enum dc_edid_status dm_helpers_parse_edid_caps( 95 struct dc_link *link, 96 const struct dc_edid *edid, 97 struct dc_edid_caps *edid_caps) 98 { 99 struct amdgpu_dm_connector *aconnector = link->priv; 100 struct drm_connector *connector = &aconnector->base; 101 struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; 102 struct cea_sad *sads; 103 int sad_count = -1; 104 int sadb_count = -1; 105 int i = 0; 106 uint8_t *sadb = NULL; 107 108 enum dc_edid_status result = EDID_OK; 109 110 if (!edid_caps || !edid) 111 return EDID_BAD_INPUT; 112 113 if (!drm_edid_is_valid(edid_buf)) 114 result = EDID_BAD_CHECKSUM; 115 116 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | 117 ((uint16_t) edid_buf->mfg_id[1])<<8; 118 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | 119 ((uint16_t) edid_buf->prod_code[1])<<8; 120 edid_caps->serial_number = edid_buf->serial; 121 edid_caps->manufacture_week = edid_buf->mfg_week; 122 edid_caps->manufacture_year = edid_buf->mfg_year; 123 124 drm_edid_get_monitor_name(edid_buf, 125 edid_caps->display_name, 126 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 127 128 edid_caps->edid_hdmi = connector->display_info.is_hdmi; 129 130 apply_edid_quirks(edid_buf, edid_caps); 131 132 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); 133 if (sad_count <= 0) 134 return result; 135 136 edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT); 137 for (i = 0; i < edid_caps->audio_mode_count; ++i) { 138 struct cea_sad *sad = &sads[i]; 139 140 edid_caps->audio_modes[i].format_code = sad->format; 141 edid_caps->audio_modes[i].channel_count = sad->channels + 1; 142 edid_caps->audio_modes[i].sample_rate = sad->freq; 143 edid_caps->audio_modes[i].sample_size = sad->byte2; 144 } 145 146 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); 147 148 if (sadb_count < 0) { 149 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); 150 sadb_count = 0; 151 } 152 153 if (sadb_count) 154 edid_caps->speaker_flags = sadb[0]; 155 else 156 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; 157 158 kfree(sads); 159 kfree(sadb); 160 161 return result; 162 } 163 164 static void 165 fill_dc_mst_payload_table_from_drm(struct dc_link *link, 166 bool enable, 167 struct drm_dp_mst_atomic_payload *target_payload, 168 struct dc_dp_mst_stream_allocation_table *table) 169 { 170 struct dc_dp_mst_stream_allocation_table new_table = { 0 }; 171 struct dc_dp_mst_stream_allocation *sa; 172 struct link_mst_stream_allocation_table copy_of_link_table = 173 link->mst_stream_alloc_table; 174 175 int i; 176 int current_hw_table_stream_cnt = copy_of_link_table.stream_count; 177 struct link_mst_stream_allocation *dc_alloc; 178 179 /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/ 180 if (enable) { 181 dc_alloc = 182 ©_of_link_table.stream_allocations[current_hw_table_stream_cnt]; 183 dc_alloc->vcp_id = target_payload->vcpi; 184 dc_alloc->slot_count = target_payload->time_slots; 185 } else { 186 for (i = 0; i < copy_of_link_table.stream_count; i++) { 187 dc_alloc = 188 ©_of_link_table.stream_allocations[i]; 189 190 if (dc_alloc->vcp_id == target_payload->vcpi) { 191 dc_alloc->vcp_id = 0; 192 dc_alloc->slot_count = 0; 193 break; 194 } 195 } 196 ASSERT(i != copy_of_link_table.stream_count); 197 } 198 199 /* Fill payload info*/ 200 for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 201 dc_alloc = 202 ©_of_link_table.stream_allocations[i]; 203 if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) { 204 sa = &new_table.stream_allocations[new_table.stream_count]; 205 sa->slot_count = dc_alloc->slot_count; 206 sa->vcp_id = dc_alloc->vcp_id; 207 new_table.stream_count++; 208 } 209 } 210 211 /* Overwrite the old table */ 212 *table = new_table; 213 } 214 215 void dm_helpers_dp_update_branch_info( 216 struct dc_context *ctx, 217 const struct dc_link *link) 218 {} 219 220 static void dm_helpers_construct_old_payload( 221 struct drm_dp_mst_topology_mgr *mgr, 222 struct drm_dp_mst_topology_state *mst_state, 223 struct drm_dp_mst_atomic_payload *new_payload, 224 struct drm_dp_mst_atomic_payload *old_payload) 225 { 226 struct drm_dp_mst_atomic_payload *pos; 227 int pbn_per_slot = dfixed_trunc(mst_state->pbn_div); 228 u8 next_payload_vc_start = mgr->next_start_slot; 229 u8 payload_vc_start = new_payload->vc_start_slot; 230 u8 allocated_time_slots; 231 232 *old_payload = *new_payload; 233 234 /* Set correct time_slots/PBN of old payload. 235 * other fields (delete & dsc_enabled) in 236 * struct drm_dp_mst_atomic_payload are don't care fields 237 * while calling drm_dp_remove_payload_part2() 238 */ 239 list_for_each_entry(pos, &mst_state->payloads, next) { 240 if (pos != new_payload && 241 pos->vc_start_slot > payload_vc_start && 242 pos->vc_start_slot < next_payload_vc_start) 243 next_payload_vc_start = pos->vc_start_slot; 244 } 245 246 allocated_time_slots = next_payload_vc_start - payload_vc_start; 247 248 old_payload->time_slots = allocated_time_slots; 249 old_payload->pbn = allocated_time_slots * pbn_per_slot; 250 } 251 252 /* 253 * Writes payload allocation table in immediate downstream device. 254 */ 255 bool dm_helpers_dp_mst_write_payload_allocation_table( 256 struct dc_context *ctx, 257 const struct dc_stream_state *stream, 258 struct dc_dp_mst_stream_allocation_table *proposed_table, 259 bool enable) 260 { 261 struct amdgpu_dm_connector *aconnector; 262 struct drm_dp_mst_topology_state *mst_state; 263 struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload; 264 struct drm_dp_mst_topology_mgr *mst_mgr; 265 266 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 267 /* Accessing the connector state is required for vcpi_slots allocation 268 * and directly relies on behaviour in commit check 269 * that blocks before commit guaranteeing that the state 270 * is not gonna be swapped while still in use in commit tail 271 */ 272 273 if (!aconnector || !aconnector->mst_root) 274 return false; 275 276 mst_mgr = &aconnector->mst_root->mst_mgr; 277 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); 278 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); 279 280 if (enable) { 281 target_payload = new_payload; 282 283 /* It's OK for this to fail */ 284 drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload); 285 } else { 286 /* construct old payload by VCPI*/ 287 dm_helpers_construct_old_payload(mst_mgr, mst_state, 288 new_payload, &old_payload); 289 target_payload = &old_payload; 290 291 drm_dp_remove_payload_part1(mst_mgr, mst_state, new_payload); 292 } 293 294 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 295 * AUX message. The sequence is slot 1-63 allocated sequence for each 296 * stream. AMD ASIC stream slot allocation should follow the same 297 * sequence. copy DRM MST allocation to dc 298 */ 299 fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table); 300 301 return true; 302 } 303 304 /* 305 * poll pending down reply 306 */ 307 void dm_helpers_dp_mst_poll_pending_down_reply( 308 struct dc_context *ctx, 309 const struct dc_link *link) 310 {} 311 312 /* 313 * Clear payload allocation table before enable MST DP link. 314 */ 315 void dm_helpers_dp_mst_clear_payload_allocation_table( 316 struct dc_context *ctx, 317 const struct dc_link *link) 318 {} 319 320 /* 321 * Polls for ACT (allocation change trigger) handled and sends 322 * ALLOCATE_PAYLOAD message. 323 */ 324 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( 325 struct dc_context *ctx, 326 const struct dc_stream_state *stream) 327 { 328 struct amdgpu_dm_connector *aconnector; 329 struct drm_dp_mst_topology_mgr *mst_mgr; 330 int ret; 331 332 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 333 334 if (!aconnector || !aconnector->mst_root) 335 return ACT_FAILED; 336 337 mst_mgr = &aconnector->mst_root->mst_mgr; 338 339 if (!mst_mgr->mst_state) 340 return ACT_FAILED; 341 342 ret = drm_dp_check_act_status(mst_mgr); 343 344 if (ret) 345 return ACT_FAILED; 346 347 return ACT_SUCCESS; 348 } 349 350 void dm_helpers_dp_mst_send_payload_allocation( 351 struct dc_context *ctx, 352 const struct dc_stream_state *stream) 353 { 354 struct amdgpu_dm_connector *aconnector; 355 struct drm_dp_mst_topology_state *mst_state; 356 struct drm_dp_mst_topology_mgr *mst_mgr; 357 struct drm_dp_mst_atomic_payload *new_payload; 358 enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; 359 enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; 360 int ret = 0; 361 362 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 363 364 if (!aconnector || !aconnector->mst_root) 365 return; 366 367 mst_mgr = &aconnector->mst_root->mst_mgr; 368 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); 369 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); 370 371 ret = drm_dp_add_payload_part2(mst_mgr, new_payload); 372 373 if (ret) { 374 amdgpu_dm_set_mst_status(&aconnector->mst_status, 375 set_flag, false); 376 } else { 377 amdgpu_dm_set_mst_status(&aconnector->mst_status, 378 set_flag, true); 379 amdgpu_dm_set_mst_status(&aconnector->mst_status, 380 clr_flag, false); 381 } 382 } 383 384 void dm_helpers_dp_mst_update_mst_mgr_for_deallocation( 385 struct dc_context *ctx, 386 const struct dc_stream_state *stream) 387 { 388 struct amdgpu_dm_connector *aconnector; 389 struct drm_dp_mst_topology_state *mst_state; 390 struct drm_dp_mst_topology_mgr *mst_mgr; 391 struct drm_dp_mst_atomic_payload *new_payload, old_payload; 392 enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; 393 enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD; 394 395 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 396 397 if (!aconnector || !aconnector->mst_root) 398 return; 399 400 mst_mgr = &aconnector->mst_root->mst_mgr; 401 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); 402 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); 403 dm_helpers_construct_old_payload(mst_mgr, mst_state, 404 new_payload, &old_payload); 405 406 drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); 407 408 amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true); 409 amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); 410 } 411 412 void dm_dtn_log_begin(struct dc_context *ctx, 413 struct dc_log_buffer_ctx *log_ctx) 414 { 415 static const char msg[] = "[dtn begin]\n"; 416 417 if (!log_ctx) { 418 pr_info("%s", msg); 419 return; 420 } 421 422 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 423 } 424 425 __printf(3, 4) 426 void dm_dtn_log_append_v(struct dc_context *ctx, 427 struct dc_log_buffer_ctx *log_ctx, 428 const char *msg, ...) 429 { 430 va_list args; 431 size_t total; 432 int n; 433 434 if (!log_ctx) { 435 /* No context, redirect to dmesg. */ 436 struct va_format vaf; 437 438 vaf.fmt = msg; 439 vaf.va = &args; 440 441 va_start(args, msg); 442 pr_info("%pV", &vaf); 443 va_end(args); 444 445 return; 446 } 447 448 /* Measure the output. */ 449 va_start(args, msg); 450 n = vsnprintf(NULL, 0, msg, args); 451 va_end(args); 452 453 if (n <= 0) 454 return; 455 456 /* Reallocate the string buffer as needed. */ 457 total = log_ctx->pos + n + 1; 458 459 if (total > log_ctx->size) { 460 char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL); 461 462 if (buf) { 463 memcpy(buf, log_ctx->buf, log_ctx->pos); 464 kfree(log_ctx->buf); 465 466 log_ctx->buf = buf; 467 log_ctx->size = total; 468 } 469 } 470 471 if (!log_ctx->buf) 472 return; 473 474 /* Write the formatted string to the log buffer. */ 475 va_start(args, msg); 476 n = vscnprintf( 477 log_ctx->buf + log_ctx->pos, 478 log_ctx->size - log_ctx->pos, 479 msg, 480 args); 481 va_end(args); 482 483 if (n > 0) 484 log_ctx->pos += n; 485 } 486 487 void dm_dtn_log_end(struct dc_context *ctx, 488 struct dc_log_buffer_ctx *log_ctx) 489 { 490 static const char msg[] = "[dtn end]\n"; 491 492 if (!log_ctx) { 493 pr_info("%s", msg); 494 return; 495 } 496 497 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 498 } 499 500 bool dm_helpers_dp_mst_start_top_mgr( 501 struct dc_context *ctx, 502 const struct dc_link *link, 503 bool boot) 504 { 505 struct amdgpu_dm_connector *aconnector = link->priv; 506 int ret; 507 508 if (!aconnector) { 509 DRM_ERROR("Failed to find connector for link!"); 510 return false; 511 } 512 513 if (boot) { 514 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", 515 aconnector, aconnector->base.base.id); 516 return true; 517 } 518 519 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", 520 aconnector, aconnector->base.base.id); 521 522 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 523 if (ret < 0) { 524 DRM_ERROR("DM_MST: Failed to set the device into MST mode!"); 525 return false; 526 } 527 528 DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0], 529 aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK); 530 531 return true; 532 } 533 534 bool dm_helpers_dp_mst_stop_top_mgr( 535 struct dc_context *ctx, 536 struct dc_link *link) 537 { 538 struct amdgpu_dm_connector *aconnector = link->priv; 539 540 if (!aconnector) { 541 DRM_ERROR("Failed to find connector for link!"); 542 return false; 543 } 544 545 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", 546 aconnector, aconnector->base.base.id); 547 548 if (aconnector->mst_mgr.mst_state == true) { 549 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); 550 link->cur_link_settings.lane_count = 0; 551 } 552 553 return false; 554 } 555 556 bool dm_helpers_dp_read_dpcd( 557 struct dc_context *ctx, 558 const struct dc_link *link, 559 uint32_t address, 560 uint8_t *data, 561 uint32_t size) 562 { 563 564 struct amdgpu_dm_connector *aconnector = link->priv; 565 566 if (!aconnector) 567 return false; 568 569 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data, 570 size) == size; 571 } 572 573 bool dm_helpers_dp_write_dpcd( 574 struct dc_context *ctx, 575 const struct dc_link *link, 576 uint32_t address, 577 const uint8_t *data, 578 uint32_t size) 579 { 580 struct amdgpu_dm_connector *aconnector = link->priv; 581 582 if (!aconnector) 583 return false; 584 585 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, 586 address, (uint8_t *)data, size) > 0; 587 } 588 589 bool dm_helpers_submit_i2c( 590 struct dc_context *ctx, 591 const struct dc_link *link, 592 struct i2c_command *cmd) 593 { 594 struct amdgpu_dm_connector *aconnector = link->priv; 595 struct i2c_msg *msgs; 596 int i = 0; 597 int num = cmd->number_of_payloads; 598 bool result; 599 600 if (!aconnector) { 601 DRM_ERROR("Failed to find connector for link!"); 602 return false; 603 } 604 605 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); 606 607 if (!msgs) 608 return false; 609 610 for (i = 0; i < num; i++) { 611 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; 612 msgs[i].addr = cmd->payloads[i].address; 613 msgs[i].len = cmd->payloads[i].length; 614 msgs[i].buf = cmd->payloads[i].data; 615 } 616 617 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; 618 619 kfree(msgs); 620 621 return result; 622 } 623 624 static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, 625 bool is_write_cmd, 626 unsigned char cmd, 627 unsigned int length, 628 unsigned int offset, 629 unsigned char *data) 630 { 631 bool success = false; 632 unsigned char rc_data[16] = {0}; 633 unsigned char rc_offset[4] = {0}; 634 unsigned char rc_length[2] = {0}; 635 unsigned char rc_cmd = 0; 636 unsigned char rc_result = 0xFF; 637 unsigned char i = 0; 638 int ret; 639 640 if (is_write_cmd) { 641 // write rc data 642 memmove(rc_data, data, length); 643 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); 644 } 645 646 // write rc offset 647 rc_offset[0] = (unsigned char) offset & 0xFF; 648 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF; 649 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; 650 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; 651 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); 652 653 // write rc length 654 rc_length[0] = (unsigned char) length & 0xFF; 655 rc_length[1] = (unsigned char) (length >> 8) & 0xFF; 656 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); 657 658 // write rc cmd 659 rc_cmd = cmd | 0x80; 660 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 661 662 if (ret < 0) { 663 DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); 664 return false; 665 } 666 667 // poll until active is 0 668 for (i = 0; i < 10; i++) { 669 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 670 if (rc_cmd == cmd) 671 // active is 0 672 break; 673 msleep(10); 674 } 675 676 // read rc result 677 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result)); 678 success = (rc_result == 0); 679 680 if (success && !is_write_cmd) { 681 // read rc data 682 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); 683 } 684 685 drm_dbg_dp(aux->drm_dev, "success = %d\n", success); 686 687 return success; 688 } 689 690 static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) 691 { 692 unsigned char data[16] = {0}; 693 694 drm_dbg_dp(aux->drm_dev, "Start\n"); 695 696 // Step 2 697 data[0] = 'P'; 698 data[1] = 'R'; 699 data[2] = 'I'; 700 data[3] = 'U'; 701 data[4] = 'S'; 702 703 if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data)) 704 return; 705 706 // Step 3 and 4 707 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 708 return; 709 710 data[0] &= (~(1 << 1)); // set bit 1 to 0 711 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 712 return; 713 714 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 715 return; 716 717 data[0] &= (~(1 << 1)); // set bit 1 to 0 718 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data)) 719 return; 720 721 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 722 return; 723 724 data[0] &= (~(1 << 1)); // set bit 1 to 0 725 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 726 return; 727 728 // Step 3 and 5 729 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 730 return; 731 732 data[0] |= (1 << 1); // set bit 1 to 1 733 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 734 return; 735 736 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 737 return; 738 739 data[0] |= (1 << 1); // set bit 1 to 1 740 741 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 742 return; 743 744 data[0] |= (1 << 1); // set bit 1 to 1 745 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 746 return; 747 748 // Step 6 749 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) 750 return; 751 752 drm_dbg_dp(aux->drm_dev, "Done\n"); 753 } 754 755 /* MST Dock */ 756 static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; 757 758 static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( 759 struct drm_dp_aux *aux, 760 const struct dc_stream_state *stream, 761 bool enable) 762 { 763 uint8_t ret = 0; 764 765 drm_dbg_dp(aux->drm_dev, 766 "MST_DSC Configure DSC to non-virtual dpcd synaptics\n"); 767 768 if (enable) { 769 /* When DSC is enabled on previous boot and reboot with the hub, 770 * there is a chance that Synaptics hub gets stuck during reboot sequence. 771 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream 772 */ 773 if (!stream->link->link_status.link_active && 774 memcmp(stream->link->dpcd_caps.branch_dev_name, 775 (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0) 776 apply_synaptics_fifo_reset_wa(aux); 777 778 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 779 DRM_INFO("MST_DSC Send DSC enable to synaptics\n"); 780 781 } else { 782 /* Synaptics hub not support virtual dpcd, 783 * external monitor occur garbage while disable DSC, 784 * Disable DSC only when entire link status turn to false, 785 */ 786 if (!stream->link->link_status.link_active) { 787 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 788 DRM_INFO("MST_DSC Send DSC disable to synaptics\n"); 789 } 790 } 791 792 return ret; 793 } 794 795 bool dm_helpers_dp_write_dsc_enable( 796 struct dc_context *ctx, 797 const struct dc_stream_state *stream, 798 bool enable) 799 { 800 static const uint8_t DSC_DISABLE; 801 static const uint8_t DSC_DECODING = 0x01; 802 static const uint8_t DSC_PASSTHROUGH = 0x02; 803 804 struct amdgpu_dm_connector *aconnector = 805 (struct amdgpu_dm_connector *)stream->dm_stream_context; 806 struct drm_device *dev = aconnector->base.dev; 807 struct drm_dp_mst_port *port; 808 uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE; 809 uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE; 810 uint8_t ret = 0; 811 812 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 813 if (!aconnector->dsc_aux) 814 return false; 815 816 // apply w/a to synaptics 817 if (needs_dsc_aux_workaround(aconnector->dc_link) && 818 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) 819 return write_dsc_enable_synaptics_non_virtual_dpcd_mst( 820 aconnector->dsc_aux, stream, enable_dsc); 821 822 port = aconnector->mst_output_port; 823 824 if (enable) { 825 if (port->passthrough_aux) { 826 ret = drm_dp_dpcd_write(port->passthrough_aux, 827 DP_DSC_ENABLE, 828 &enable_passthrough, 1); 829 drm_dbg_dp(dev, 830 "MST_DSC Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", 831 ret); 832 } 833 834 ret = drm_dp_dpcd_write(aconnector->dsc_aux, 835 DP_DSC_ENABLE, &enable_dsc, 1); 836 drm_dbg_dp(dev, 837 "MST_DSC Sent DSC decoding enable to %s port, ret = %u\n", 838 (port->passthrough_aux) ? "remote RX" : 839 "virtual dpcd", 840 ret); 841 } else { 842 ret = drm_dp_dpcd_write(aconnector->dsc_aux, 843 DP_DSC_ENABLE, &enable_dsc, 1); 844 drm_dbg_dp(dev, 845 "MST_DSC Sent DSC decoding disable to %s port, ret = %u\n", 846 (port->passthrough_aux) ? "remote RX" : 847 "virtual dpcd", 848 ret); 849 850 if (port->passthrough_aux) { 851 ret = drm_dp_dpcd_write(port->passthrough_aux, 852 DP_DSC_ENABLE, 853 &enable_passthrough, 1); 854 drm_dbg_dp(dev, 855 "MST_DSC Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", 856 ret); 857 } 858 } 859 } 860 861 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { 862 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 863 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 864 drm_dbg_dp(dev, 865 "SST_DSC Send DSC %s to SST RX\n", 866 enable_dsc ? "enable" : "disable"); 867 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 868 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 869 drm_dbg_dp(dev, 870 "SST_DSC Send DSC %s to DP-HDMI PCON\n", 871 enable_dsc ? "enable" : "disable"); 872 } 873 } 874 875 return ret; 876 } 877 878 bool dm_helpers_is_dp_sink_present(struct dc_link *link) 879 { 880 bool dp_sink_present; 881 struct amdgpu_dm_connector *aconnector = link->priv; 882 883 if (!aconnector) { 884 BUG_ON("Failed to find connector for link!"); 885 return true; 886 } 887 888 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex); 889 dp_sink_present = dc_link_is_dp_sink_present(link); 890 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex); 891 return dp_sink_present; 892 } 893 894 enum dc_edid_status dm_helpers_read_local_edid( 895 struct dc_context *ctx, 896 struct dc_link *link, 897 struct dc_sink *sink) 898 { 899 struct amdgpu_dm_connector *aconnector = link->priv; 900 struct drm_connector *connector = &aconnector->base; 901 struct i2c_adapter *ddc; 902 int retry = 3; 903 enum dc_edid_status edid_status; 904 struct edid *edid; 905 906 if (link->aux_mode) 907 ddc = &aconnector->dm_dp_aux.aux.ddc; 908 else 909 ddc = &aconnector->i2c->base; 910 911 /* some dongles read edid incorrectly the first time, 912 * do check sum and retry to make sure read correct edid. 913 */ 914 do { 915 916 edid = drm_get_edid(&aconnector->base, ddc); 917 918 /* DP Compliance Test 4.2.2.6 */ 919 if (link->aux_mode && connector->edid_corrupt) 920 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); 921 922 if (!edid && connector->edid_corrupt) { 923 connector->edid_corrupt = false; 924 return EDID_BAD_CHECKSUM; 925 } 926 927 if (!edid) 928 return EDID_NO_RESPONSE; 929 930 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); 931 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); 932 933 /* We don't need the original edid anymore */ 934 kfree(edid); 935 936 edid_status = dm_helpers_parse_edid_caps( 937 link, 938 &sink->dc_edid, 939 &sink->edid_caps); 940 941 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); 942 943 if (edid_status != EDID_OK) 944 DRM_ERROR("EDID err: %d, on connector: %s", 945 edid_status, 946 aconnector->base.name); 947 if (link->aux_mode) { 948 union test_request test_request = {0}; 949 union test_response test_response = {0}; 950 951 dm_helpers_dp_read_dpcd(ctx, 952 link, 953 DP_TEST_REQUEST, 954 &test_request.raw, 955 sizeof(union test_request)); 956 957 if (!test_request.bits.EDID_READ) 958 return edid_status; 959 960 test_response.bits.EDID_CHECKSUM_WRITE = 1; 961 962 dm_helpers_dp_write_dpcd(ctx, 963 link, 964 DP_TEST_EDID_CHECKSUM, 965 &sink->dc_edid.raw_edid[sink->dc_edid.length-1], 966 1); 967 968 dm_helpers_dp_write_dpcd(ctx, 969 link, 970 DP_TEST_RESPONSE, 971 &test_response.raw, 972 sizeof(test_response)); 973 974 } 975 976 return edid_status; 977 } 978 int dm_helper_dmub_aux_transfer_sync( 979 struct dc_context *ctx, 980 const struct dc_link *link, 981 struct aux_payload *payload, 982 enum aux_return_code_type *operation_result) 983 { 984 if (!link->hpd_status) { 985 *operation_result = AUX_RET_ERROR_HPD_DISCON; 986 return -1; 987 } 988 989 return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, 990 operation_result); 991 } 992 993 int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, 994 const struct dc_link *link, 995 struct set_config_cmd_payload *payload, 996 enum set_config_status *operation_result) 997 { 998 return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload, 999 operation_result); 1000 } 1001 1002 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) 1003 { 1004 /* TODO: something */ 1005 } 1006 1007 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us) 1008 { 1009 // TODO: 1010 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); 1011 } 1012 1013 void dm_helpers_init_panel_settings( 1014 struct dc_context *ctx, 1015 struct dc_panel_config *panel_config, 1016 struct dc_sink *sink) 1017 { 1018 // Extra Panel Power Sequence 1019 panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms; 1020 panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms; 1021 panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off; 1022 panel_config->pps.extra_post_t7_ms = 0; 1023 panel_config->pps.extra_pre_t11_ms = 0; 1024 panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms; 1025 panel_config->pps.extra_post_OUI_ms = 0; 1026 // Feature DSC 1027 panel_config->dsc.disable_dsc_edp = false; 1028 panel_config->dsc.force_dsc_edp_policy = 0; 1029 } 1030 1031 void dm_helpers_override_panel_settings( 1032 struct dc_context *ctx, 1033 struct dc_panel_config *panel_config) 1034 { 1035 // Feature DSC 1036 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1037 panel_config->dsc.disable_dsc_edp = true; 1038 } 1039 1040 void *dm_helpers_allocate_gpu_mem( 1041 struct dc_context *ctx, 1042 enum dc_gpu_mem_alloc_type type, 1043 size_t size, 1044 long long *addr) 1045 { 1046 struct amdgpu_device *adev = ctx->driver_context; 1047 1048 return dm_allocate_gpu_mem(adev, type, size, addr); 1049 } 1050 1051 void dm_helpers_free_gpu_mem( 1052 struct dc_context *ctx, 1053 enum dc_gpu_mem_alloc_type type, 1054 void *pvMem) 1055 { 1056 struct amdgpu_device *adev = ctx->driver_context; 1057 struct dal_allocation *da; 1058 1059 /* walk the da list in DM */ 1060 list_for_each_entry(da, &adev->dm.da_list, list) { 1061 if (pvMem == da->cpu_ptr) { 1062 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 1063 list_del(&da->list); 1064 kfree(da); 1065 break; 1066 } 1067 } 1068 } 1069 1070 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) 1071 { 1072 enum dc_irq_source irq_source; 1073 bool ret; 1074 1075 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 1076 1077 ret = dc_interrupt_set(ctx->dc, irq_source, enable); 1078 1079 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n", 1080 enable ? "en" : "dis", ret); 1081 return ret; 1082 } 1083 1084 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) 1085 { 1086 /* TODO: virtual DPCD */ 1087 struct dc_link *link = stream->link; 1088 union down_spread_ctrl old_downspread; 1089 union down_spread_ctrl new_downspread; 1090 1091 if (link->aux_access_disabled) 1092 return; 1093 1094 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 1095 &old_downspread.raw, 1096 sizeof(old_downspread))) 1097 return; 1098 1099 new_downspread.raw = old_downspread.raw; 1100 new_downspread.bits.IGNORE_MSA_TIMING_PARAM = 1101 (stream->ignore_msa_timing_param) ? 1 : 0; 1102 1103 if (new_downspread.raw != old_downspread.raw) 1104 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 1105 &new_downspread.raw, 1106 sizeof(new_downspread)); 1107 } 1108 1109 bool dm_helpers_dp_handle_test_pattern_request( 1110 struct dc_context *ctx, 1111 const struct dc_link *link, 1112 union link_test_pattern dpcd_test_pattern, 1113 union test_misc dpcd_test_params) 1114 { 1115 enum dp_test_pattern test_pattern; 1116 enum dp_test_pattern_color_space test_pattern_color_space = 1117 DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; 1118 enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED; 1119 enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED; 1120 struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; 1121 struct pipe_ctx *pipe_ctx = NULL; 1122 struct amdgpu_dm_connector *aconnector = link->priv; 1123 struct drm_device *dev = aconnector->base.dev; 1124 int i; 1125 1126 for (i = 0; i < MAX_PIPES; i++) { 1127 if (pipes[i].stream == NULL) 1128 continue; 1129 1130 if (pipes[i].stream->link == link && !pipes[i].top_pipe && 1131 !pipes[i].prev_odm_pipe) { 1132 pipe_ctx = &pipes[i]; 1133 break; 1134 } 1135 } 1136 1137 if (pipe_ctx == NULL) 1138 return false; 1139 1140 switch (dpcd_test_pattern.bits.PATTERN) { 1141 case LINK_TEST_PATTERN_COLOR_RAMP: 1142 test_pattern = DP_TEST_PATTERN_COLOR_RAMP; 1143 break; 1144 case LINK_TEST_PATTERN_VERTICAL_BARS: 1145 test_pattern = DP_TEST_PATTERN_VERTICAL_BARS; 1146 break; /* black and white */ 1147 case LINK_TEST_PATTERN_COLOR_SQUARES: 1148 test_pattern = (dpcd_test_params.bits.DYN_RANGE == 1149 TEST_DYN_RANGE_VESA ? 1150 DP_TEST_PATTERN_COLOR_SQUARES : 1151 DP_TEST_PATTERN_COLOR_SQUARES_CEA); 1152 break; 1153 default: 1154 test_pattern = DP_TEST_PATTERN_VIDEO_MODE; 1155 break; 1156 } 1157 1158 if (dpcd_test_params.bits.CLR_FORMAT == 0) 1159 test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; 1160 else 1161 test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? 1162 DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : 1163 DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; 1164 1165 switch (dpcd_test_params.bits.BPC) { 1166 case 0: // 6 bits 1167 requestColorDepth = COLOR_DEPTH_666; 1168 break; 1169 case 1: // 8 bits 1170 requestColorDepth = COLOR_DEPTH_888; 1171 break; 1172 case 2: // 10 bits 1173 requestColorDepth = COLOR_DEPTH_101010; 1174 break; 1175 case 3: // 12 bits 1176 requestColorDepth = COLOR_DEPTH_121212; 1177 break; 1178 default: 1179 break; 1180 } 1181 1182 switch (dpcd_test_params.bits.CLR_FORMAT) { 1183 case 0: 1184 requestPixelEncoding = PIXEL_ENCODING_RGB; 1185 break; 1186 case 1: 1187 requestPixelEncoding = PIXEL_ENCODING_YCBCR422; 1188 break; 1189 case 2: 1190 requestPixelEncoding = PIXEL_ENCODING_YCBCR444; 1191 break; 1192 default: 1193 requestPixelEncoding = PIXEL_ENCODING_RGB; 1194 break; 1195 } 1196 1197 if ((requestColorDepth != COLOR_DEPTH_UNDEFINED 1198 && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) 1199 || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED 1200 && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) { 1201 drm_dbg(dev, 1202 "original bpc %d pix encoding %d, changing to %d %d\n", 1203 pipe_ctx->stream->timing.display_color_depth, 1204 pipe_ctx->stream->timing.pixel_encoding, 1205 requestColorDepth, 1206 requestPixelEncoding); 1207 pipe_ctx->stream->timing.display_color_depth = requestColorDepth; 1208 pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding; 1209 1210 dc_link_update_dsc_config(pipe_ctx); 1211 1212 aconnector->timing_changed = true; 1213 /* store current timing */ 1214 if (aconnector->timing_requested) 1215 *aconnector->timing_requested = pipe_ctx->stream->timing; 1216 else 1217 drm_err(dev, "timing storage failed\n"); 1218 1219 } 1220 1221 pipe_ctx->stream->test_pattern.type = test_pattern; 1222 pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; 1223 1224 dc_link_dp_set_test_pattern( 1225 (struct dc_link *) link, 1226 test_pattern, 1227 test_pattern_color_space, 1228 NULL, 1229 NULL, 1230 0); 1231 1232 return false; 1233 } 1234 1235 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) 1236 { 1237 // TODO 1238 } 1239 1240 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) 1241 { 1242 struct amdgpu_device *adev = ctx->driver_context; 1243 1244 if (adev->dm.idle_workqueue) { 1245 adev->dm.idle_workqueue->enable = enable; 1246 if (enable && !adev->dm.idle_workqueue->running && amdgpu_dm_is_headless(adev)) 1247 schedule_work(&adev->dm.idle_workqueue->work); 1248 } 1249 } 1250 1251 void dm_helpers_dp_mst_update_branch_bandwidth( 1252 struct dc_context *ctx, 1253 struct dc_link *link) 1254 { 1255 // TODO 1256 } 1257 1258 static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id) 1259 { 1260 bool ret_val = false; 1261 1262 switch (branch_dev_id) { 1263 case DP_BRANCH_DEVICE_ID_0060AD: 1264 case DP_BRANCH_DEVICE_ID_00E04C: 1265 case DP_BRANCH_DEVICE_ID_90CC24: 1266 ret_val = true; 1267 break; 1268 default: 1269 break; 1270 } 1271 1272 return ret_val; 1273 } 1274 1275 enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link) 1276 { 1277 struct dpcd_caps *dpcd_caps = &link->dpcd_caps; 1278 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 1279 1280 switch (dpcd_caps->dongle_type) { 1281 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 1282 if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true && 1283 dpcd_caps->allow_invalid_MSA_timing_param == true && 1284 dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id)) 1285 as_type = FREESYNC_TYPE_PCON_IN_WHITELIST; 1286 break; 1287 default: 1288 break; 1289 } 1290 1291 return as_type; 1292 } 1293 1294 bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream) 1295 { 1296 // TODO 1297 return false; 1298 } 1299 1300 bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream) 1301 { 1302 // TODO 1303 return false; 1304 }