1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <acpi/video.h> 27 28 #include <linux/string.h> 29 #include <linux/acpi.h> 30 #include <linux/i2c.h> 31 32 #include <drm/drm_atomic.h> 33 #include <drm/drm_probe_helper.h> 34 #include <drm/amdgpu_drm.h> 35 #include <drm/drm_edid.h> 36 #include <drm/drm_fixed.h> 37 38 #include "dm_services.h" 39 #include "amdgpu.h" 40 #include "dc.h" 41 #include "amdgpu_dm.h" 42 #include "amdgpu_dm_irq.h" 43 #include "amdgpu_dm_mst_types.h" 44 #include "dpcd_defs.h" 45 #include "dc/inc/core_types.h" 46 47 #include "dm_helpers.h" 48 #include "ddc_service_types.h" 49 50 static u32 edid_extract_panel_id(struct edid *edid) 51 { 52 return (u32)edid->mfg_id[0] << 24 | 53 (u32)edid->mfg_id[1] << 16 | 54 (u32)EDID_PRODUCT_ID(edid); 55 } 56 57 static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) 58 { 59 uint32_t panel_id = edid_extract_panel_id(edid); 60 61 switch (panel_id) { 62 /* Workaround for some monitors which does not work well with FAMS */ 63 case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): 64 case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): 65 case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC): 66 DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id); 67 edid_caps->panel_patch.disable_fams = true; 68 break; 69 /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ 70 case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB): 71 case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B): 72 case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A): 73 case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1): 74 case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003): 75 DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); 76 edid_caps->panel_patch.remove_sink_ext_caps = true; 77 break; 78 case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): 79 DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id); 80 edid_caps->panel_patch.disable_colorimetry = true; 81 break; 82 default: 83 return; 84 } 85 } 86 87 /** 88 * dm_helpers_parse_edid_caps() - Parse edid caps 89 * 90 * @link: current detected link 91 * @edid: [in] pointer to edid 92 * @edid_caps: [in] pointer to edid caps 93 * 94 * Return: void 95 */ 96 enum dc_edid_status dm_helpers_parse_edid_caps( 97 struct dc_link *link, 98 const struct dc_edid *edid, 99 struct dc_edid_caps *edid_caps) 100 { 101 struct amdgpu_dm_connector *aconnector = link->priv; 102 struct drm_connector *connector = &aconnector->base; 103 struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; 104 struct cea_sad *sads; 105 int sad_count = -1; 106 int sadb_count = -1; 107 int i = 0; 108 uint8_t *sadb = NULL; 109 110 enum dc_edid_status result = EDID_OK; 111 112 if (!edid_caps || !edid) 113 return EDID_BAD_INPUT; 114 115 if (!drm_edid_is_valid(edid_buf)) 116 result = EDID_BAD_CHECKSUM; 117 118 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | 119 ((uint16_t) edid_buf->mfg_id[1])<<8; 120 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | 121 ((uint16_t) edid_buf->prod_code[1])<<8; 122 edid_caps->serial_number = edid_buf->serial; 123 edid_caps->manufacture_week = edid_buf->mfg_week; 124 edid_caps->manufacture_year = edid_buf->mfg_year; 125 126 drm_edid_get_monitor_name(edid_buf, 127 edid_caps->display_name, 128 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 129 130 edid_caps->edid_hdmi = connector->display_info.is_hdmi; 131 132 apply_edid_quirks(edid_buf, edid_caps); 133 134 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); 135 if (sad_count <= 0) 136 return result; 137 138 edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT); 139 for (i = 0; i < edid_caps->audio_mode_count; ++i) { 140 struct cea_sad *sad = &sads[i]; 141 142 edid_caps->audio_modes[i].format_code = sad->format; 143 edid_caps->audio_modes[i].channel_count = sad->channels + 1; 144 edid_caps->audio_modes[i].sample_rate = sad->freq; 145 edid_caps->audio_modes[i].sample_size = sad->byte2; 146 } 147 148 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); 149 150 if (sadb_count < 0) { 151 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); 152 sadb_count = 0; 153 } 154 155 if (sadb_count) 156 edid_caps->speaker_flags = sadb[0]; 157 else 158 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; 159 160 kfree(sads); 161 kfree(sadb); 162 163 return result; 164 } 165 166 static void 167 fill_dc_mst_payload_table_from_drm(struct dc_link *link, 168 bool enable, 169 struct drm_dp_mst_atomic_payload *target_payload, 170 struct dc_dp_mst_stream_allocation_table *table) 171 { 172 struct dc_dp_mst_stream_allocation_table new_table = { 0 }; 173 struct dc_dp_mst_stream_allocation *sa; 174 struct link_mst_stream_allocation_table copy_of_link_table = 175 link->mst_stream_alloc_table; 176 177 int i; 178 int current_hw_table_stream_cnt = copy_of_link_table.stream_count; 179 struct link_mst_stream_allocation *dc_alloc; 180 181 /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/ 182 if (enable) { 183 dc_alloc = 184 ©_of_link_table.stream_allocations[current_hw_table_stream_cnt]; 185 dc_alloc->vcp_id = target_payload->vcpi; 186 dc_alloc->slot_count = target_payload->time_slots; 187 } else { 188 for (i = 0; i < copy_of_link_table.stream_count; i++) { 189 dc_alloc = 190 ©_of_link_table.stream_allocations[i]; 191 192 if (dc_alloc->vcp_id == target_payload->vcpi) { 193 dc_alloc->vcp_id = 0; 194 dc_alloc->slot_count = 0; 195 break; 196 } 197 } 198 ASSERT(i != copy_of_link_table.stream_count); 199 } 200 201 /* Fill payload info*/ 202 for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 203 dc_alloc = 204 ©_of_link_table.stream_allocations[i]; 205 if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) { 206 sa = &new_table.stream_allocations[new_table.stream_count]; 207 sa->slot_count = dc_alloc->slot_count; 208 sa->vcp_id = dc_alloc->vcp_id; 209 new_table.stream_count++; 210 } 211 } 212 213 /* Overwrite the old table */ 214 *table = new_table; 215 } 216 217 void dm_helpers_dp_update_branch_info( 218 struct dc_context *ctx, 219 const struct dc_link *link) 220 {} 221 222 static void dm_helpers_construct_old_payload( 223 struct drm_dp_mst_topology_mgr *mgr, 224 struct drm_dp_mst_topology_state *mst_state, 225 struct drm_dp_mst_atomic_payload *new_payload, 226 struct drm_dp_mst_atomic_payload *old_payload) 227 { 228 struct drm_dp_mst_atomic_payload *pos; 229 int pbn_per_slot = dfixed_trunc(mst_state->pbn_div); 230 u8 next_payload_vc_start = mgr->next_start_slot; 231 u8 payload_vc_start = new_payload->vc_start_slot; 232 u8 allocated_time_slots; 233 234 *old_payload = *new_payload; 235 236 /* Set correct time_slots/PBN of old payload. 237 * other fields (delete & dsc_enabled) in 238 * struct drm_dp_mst_atomic_payload are don't care fields 239 * while calling drm_dp_remove_payload_part2() 240 */ 241 list_for_each_entry(pos, &mst_state->payloads, next) { 242 if (pos != new_payload && 243 pos->vc_start_slot > payload_vc_start && 244 pos->vc_start_slot < next_payload_vc_start) 245 next_payload_vc_start = pos->vc_start_slot; 246 } 247 248 allocated_time_slots = next_payload_vc_start - payload_vc_start; 249 250 old_payload->time_slots = allocated_time_slots; 251 old_payload->pbn = allocated_time_slots * pbn_per_slot; 252 } 253 254 /* 255 * Writes payload allocation table in immediate downstream device. 256 */ 257 bool dm_helpers_dp_mst_write_payload_allocation_table( 258 struct dc_context *ctx, 259 const struct dc_stream_state *stream, 260 struct dc_dp_mst_stream_allocation_table *proposed_table, 261 bool enable) 262 { 263 struct amdgpu_dm_connector *aconnector; 264 struct drm_dp_mst_topology_state *mst_state; 265 struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload; 266 struct drm_dp_mst_topology_mgr *mst_mgr; 267 268 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 269 /* Accessing the connector state is required for vcpi_slots allocation 270 * and directly relies on behaviour in commit check 271 * that blocks before commit guaranteeing that the state 272 * is not gonna be swapped while still in use in commit tail 273 */ 274 275 if (!aconnector || !aconnector->mst_root) 276 return false; 277 278 mst_mgr = &aconnector->mst_root->mst_mgr; 279 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); 280 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); 281 282 if (enable) { 283 target_payload = new_payload; 284 285 /* It's OK for this to fail */ 286 drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload); 287 } else { 288 /* construct old payload by VCPI*/ 289 dm_helpers_construct_old_payload(mst_mgr, mst_state, 290 new_payload, &old_payload); 291 target_payload = &old_payload; 292 293 drm_dp_remove_payload_part1(mst_mgr, mst_state, new_payload); 294 } 295 296 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 297 * AUX message. The sequence is slot 1-63 allocated sequence for each 298 * stream. AMD ASIC stream slot allocation should follow the same 299 * sequence. copy DRM MST allocation to dc 300 */ 301 fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table); 302 303 return true; 304 } 305 306 /* 307 * poll pending down reply 308 */ 309 void dm_helpers_dp_mst_poll_pending_down_reply( 310 struct dc_context *ctx, 311 const struct dc_link *link) 312 {} 313 314 /* 315 * Clear payload allocation table before enable MST DP link. 316 */ 317 void dm_helpers_dp_mst_clear_payload_allocation_table( 318 struct dc_context *ctx, 319 const struct dc_link *link) 320 {} 321 322 /* 323 * Polls for ACT (allocation change trigger) handled and sends 324 * ALLOCATE_PAYLOAD message. 325 */ 326 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( 327 struct dc_context *ctx, 328 const struct dc_stream_state *stream) 329 { 330 struct amdgpu_dm_connector *aconnector; 331 struct drm_dp_mst_topology_mgr *mst_mgr; 332 int ret; 333 334 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 335 336 if (!aconnector || !aconnector->mst_root) 337 return ACT_FAILED; 338 339 mst_mgr = &aconnector->mst_root->mst_mgr; 340 341 if (!mst_mgr->mst_state) 342 return ACT_FAILED; 343 344 ret = drm_dp_check_act_status(mst_mgr); 345 346 if (ret) 347 return ACT_FAILED; 348 349 return ACT_SUCCESS; 350 } 351 352 void dm_helpers_dp_mst_send_payload_allocation( 353 struct dc_context *ctx, 354 const struct dc_stream_state *stream) 355 { 356 struct amdgpu_dm_connector *aconnector; 357 struct drm_dp_mst_topology_state *mst_state; 358 struct drm_dp_mst_topology_mgr *mst_mgr; 359 struct drm_dp_mst_atomic_payload *new_payload; 360 enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; 361 enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; 362 int ret = 0; 363 364 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 365 366 if (!aconnector || !aconnector->mst_root) 367 return; 368 369 mst_mgr = &aconnector->mst_root->mst_mgr; 370 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); 371 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); 372 373 ret = drm_dp_add_payload_part2(mst_mgr, new_payload); 374 375 if (ret) { 376 amdgpu_dm_set_mst_status(&aconnector->mst_status, 377 set_flag, false); 378 } else { 379 amdgpu_dm_set_mst_status(&aconnector->mst_status, 380 set_flag, true); 381 amdgpu_dm_set_mst_status(&aconnector->mst_status, 382 clr_flag, false); 383 } 384 } 385 386 void dm_helpers_dp_mst_update_mst_mgr_for_deallocation( 387 struct dc_context *ctx, 388 const struct dc_stream_state *stream) 389 { 390 struct amdgpu_dm_connector *aconnector; 391 struct drm_dp_mst_topology_state *mst_state; 392 struct drm_dp_mst_topology_mgr *mst_mgr; 393 struct drm_dp_mst_atomic_payload *new_payload, old_payload; 394 enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; 395 enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD; 396 397 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 398 399 if (!aconnector || !aconnector->mst_root) 400 return; 401 402 mst_mgr = &aconnector->mst_root->mst_mgr; 403 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); 404 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); 405 dm_helpers_construct_old_payload(mst_mgr, mst_state, 406 new_payload, &old_payload); 407 408 drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); 409 410 amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true); 411 amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); 412 } 413 414 void dm_dtn_log_begin(struct dc_context *ctx, 415 struct dc_log_buffer_ctx *log_ctx) 416 { 417 static const char msg[] = "[dtn begin]\n"; 418 419 if (!log_ctx) { 420 pr_info("%s", msg); 421 return; 422 } 423 424 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 425 } 426 427 __printf(3, 4) 428 void dm_dtn_log_append_v(struct dc_context *ctx, 429 struct dc_log_buffer_ctx *log_ctx, 430 const char *msg, ...) 431 { 432 va_list args; 433 size_t total; 434 int n; 435 436 if (!log_ctx) { 437 /* No context, redirect to dmesg. */ 438 struct va_format vaf; 439 440 vaf.fmt = msg; 441 vaf.va = &args; 442 443 va_start(args, msg); 444 pr_info("%pV", &vaf); 445 va_end(args); 446 447 return; 448 } 449 450 /* Measure the output. */ 451 va_start(args, msg); 452 n = vsnprintf(NULL, 0, msg, args); 453 va_end(args); 454 455 if (n <= 0) 456 return; 457 458 /* Reallocate the string buffer as needed. */ 459 total = log_ctx->pos + n + 1; 460 461 if (total > log_ctx->size) { 462 char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL); 463 464 if (buf) { 465 memcpy(buf, log_ctx->buf, log_ctx->pos); 466 kfree(log_ctx->buf); 467 468 log_ctx->buf = buf; 469 log_ctx->size = total; 470 } 471 } 472 473 if (!log_ctx->buf) 474 return; 475 476 /* Write the formatted string to the log buffer. */ 477 va_start(args, msg); 478 n = vscnprintf( 479 log_ctx->buf + log_ctx->pos, 480 log_ctx->size - log_ctx->pos, 481 msg, 482 args); 483 va_end(args); 484 485 if (n > 0) 486 log_ctx->pos += n; 487 } 488 489 void dm_dtn_log_end(struct dc_context *ctx, 490 struct dc_log_buffer_ctx *log_ctx) 491 { 492 static const char msg[] = "[dtn end]\n"; 493 494 if (!log_ctx) { 495 pr_info("%s", msg); 496 return; 497 } 498 499 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 500 } 501 502 bool dm_helpers_dp_mst_start_top_mgr( 503 struct dc_context *ctx, 504 const struct dc_link *link, 505 bool boot) 506 { 507 struct amdgpu_dm_connector *aconnector = link->priv; 508 int ret; 509 510 if (!aconnector) { 511 DRM_ERROR("Failed to find connector for link!"); 512 return false; 513 } 514 515 if (boot) { 516 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", 517 aconnector, aconnector->base.base.id); 518 return true; 519 } 520 521 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", 522 aconnector, aconnector->base.base.id); 523 524 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 525 if (ret < 0) { 526 DRM_ERROR("DM_MST: Failed to set the device into MST mode!"); 527 return false; 528 } 529 530 DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0], 531 aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK); 532 533 return true; 534 } 535 536 bool dm_helpers_dp_mst_stop_top_mgr( 537 struct dc_context *ctx, 538 struct dc_link *link) 539 { 540 struct amdgpu_dm_connector *aconnector = link->priv; 541 542 if (!aconnector) { 543 DRM_ERROR("Failed to find connector for link!"); 544 return false; 545 } 546 547 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", 548 aconnector, aconnector->base.base.id); 549 550 if (aconnector->mst_mgr.mst_state == true) { 551 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); 552 link->cur_link_settings.lane_count = 0; 553 } 554 555 return false; 556 } 557 558 bool dm_helpers_dp_read_dpcd( 559 struct dc_context *ctx, 560 const struct dc_link *link, 561 uint32_t address, 562 uint8_t *data, 563 uint32_t size) 564 { 565 566 struct amdgpu_dm_connector *aconnector = link->priv; 567 568 if (!aconnector) 569 return false; 570 571 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data, 572 size) == size; 573 } 574 575 bool dm_helpers_dp_write_dpcd( 576 struct dc_context *ctx, 577 const struct dc_link *link, 578 uint32_t address, 579 const uint8_t *data, 580 uint32_t size) 581 { 582 struct amdgpu_dm_connector *aconnector = link->priv; 583 584 if (!aconnector) 585 return false; 586 587 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, 588 address, (uint8_t *)data, size) > 0; 589 } 590 591 bool dm_helpers_submit_i2c( 592 struct dc_context *ctx, 593 const struct dc_link *link, 594 struct i2c_command *cmd) 595 { 596 struct amdgpu_dm_connector *aconnector = link->priv; 597 struct i2c_msg *msgs; 598 int i = 0; 599 int num = cmd->number_of_payloads; 600 bool result; 601 602 if (!aconnector) { 603 DRM_ERROR("Failed to find connector for link!"); 604 return false; 605 } 606 607 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); 608 609 if (!msgs) 610 return false; 611 612 for (i = 0; i < num; i++) { 613 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; 614 msgs[i].addr = cmd->payloads[i].address; 615 msgs[i].len = cmd->payloads[i].length; 616 msgs[i].buf = cmd->payloads[i].data; 617 } 618 619 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; 620 621 kfree(msgs); 622 623 return result; 624 } 625 626 static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, 627 bool is_write_cmd, 628 unsigned char cmd, 629 unsigned int length, 630 unsigned int offset, 631 unsigned char *data) 632 { 633 bool success = false; 634 unsigned char rc_data[16] = {0}; 635 unsigned char rc_offset[4] = {0}; 636 unsigned char rc_length[2] = {0}; 637 unsigned char rc_cmd = 0; 638 unsigned char rc_result = 0xFF; 639 unsigned char i = 0; 640 int ret; 641 642 if (is_write_cmd) { 643 // write rc data 644 memmove(rc_data, data, length); 645 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); 646 if (ret < 0) 647 goto err; 648 } 649 650 // write rc offset 651 rc_offset[0] = (unsigned char) offset & 0xFF; 652 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF; 653 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; 654 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; 655 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); 656 if (ret < 0) 657 goto err; 658 659 // write rc length 660 rc_length[0] = (unsigned char) length & 0xFF; 661 rc_length[1] = (unsigned char) (length >> 8) & 0xFF; 662 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); 663 if (ret < 0) 664 goto err; 665 666 // write rc cmd 667 rc_cmd = cmd | 0x80; 668 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 669 if (ret < 0) 670 goto err; 671 672 // poll until active is 0 673 for (i = 0; i < 10; i++) { 674 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 675 if (rc_cmd == cmd) 676 // active is 0 677 break; 678 msleep(10); 679 } 680 681 // read rc result 682 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result)); 683 success = (rc_result == 0); 684 685 if (success && !is_write_cmd) { 686 // read rc data 687 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); 688 } 689 690 drm_dbg_dp(aux->drm_dev, "success = %d\n", success); 691 692 return success; 693 694 err: 695 DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); 696 return false; 697 } 698 699 static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) 700 { 701 unsigned char data[16] = {0}; 702 703 drm_dbg_dp(aux->drm_dev, "Start\n"); 704 705 // Step 2 706 data[0] = 'P'; 707 data[1] = 'R'; 708 data[2] = 'I'; 709 data[3] = 'U'; 710 data[4] = 'S'; 711 712 if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data)) 713 return; 714 715 // Step 3 and 4 716 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 717 return; 718 719 data[0] &= (~(1 << 1)); // set bit 1 to 0 720 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 721 return; 722 723 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 724 return; 725 726 data[0] &= (~(1 << 1)); // set bit 1 to 0 727 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data)) 728 return; 729 730 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 731 return; 732 733 data[0] &= (~(1 << 1)); // set bit 1 to 0 734 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 735 return; 736 737 // Step 3 and 5 738 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 739 return; 740 741 data[0] |= (1 << 1); // set bit 1 to 1 742 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 743 return; 744 745 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 746 return; 747 748 data[0] |= (1 << 1); // set bit 1 to 1 749 750 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 751 return; 752 753 data[0] |= (1 << 1); // set bit 1 to 1 754 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 755 return; 756 757 // Step 6 758 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) 759 return; 760 761 drm_dbg_dp(aux->drm_dev, "Done\n"); 762 } 763 764 /* MST Dock */ 765 static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; 766 767 static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( 768 struct drm_dp_aux *aux, 769 const struct dc_stream_state *stream, 770 bool enable) 771 { 772 uint8_t ret = 0; 773 774 drm_dbg_dp(aux->drm_dev, 775 "MST_DSC Configure DSC to non-virtual dpcd synaptics\n"); 776 777 if (enable) { 778 /* When DSC is enabled on previous boot and reboot with the hub, 779 * there is a chance that Synaptics hub gets stuck during reboot sequence. 780 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream 781 */ 782 if (!stream->link->link_status.link_active && 783 memcmp(stream->link->dpcd_caps.branch_dev_name, 784 (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0) 785 apply_synaptics_fifo_reset_wa(aux); 786 787 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 788 DRM_INFO("MST_DSC Send DSC enable to synaptics\n"); 789 790 } else { 791 /* Synaptics hub not support virtual dpcd, 792 * external monitor occur garbage while disable DSC, 793 * Disable DSC only when entire link status turn to false, 794 */ 795 if (!stream->link->link_status.link_active) { 796 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 797 DRM_INFO("MST_DSC Send DSC disable to synaptics\n"); 798 } 799 } 800 801 return ret; 802 } 803 804 bool dm_helpers_dp_write_dsc_enable( 805 struct dc_context *ctx, 806 const struct dc_stream_state *stream, 807 bool enable) 808 { 809 static const uint8_t DSC_DISABLE; 810 static const uint8_t DSC_DECODING = 0x01; 811 static const uint8_t DSC_PASSTHROUGH = 0x02; 812 813 struct amdgpu_dm_connector *aconnector = 814 (struct amdgpu_dm_connector *)stream->dm_stream_context; 815 struct drm_device *dev = aconnector->base.dev; 816 struct drm_dp_mst_port *port; 817 uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE; 818 uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE; 819 uint8_t ret = 0; 820 821 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 822 if (!aconnector->dsc_aux) 823 return false; 824 825 // apply w/a to synaptics 826 if (needs_dsc_aux_workaround(aconnector->dc_link) && 827 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) 828 return write_dsc_enable_synaptics_non_virtual_dpcd_mst( 829 aconnector->dsc_aux, stream, enable_dsc); 830 831 port = aconnector->mst_output_port; 832 833 if (enable) { 834 if (port->passthrough_aux) { 835 ret = drm_dp_dpcd_write(port->passthrough_aux, 836 DP_DSC_ENABLE, 837 &enable_passthrough, 1); 838 drm_dbg_dp(dev, 839 "MST_DSC Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", 840 ret); 841 } 842 843 ret = drm_dp_dpcd_write(aconnector->dsc_aux, 844 DP_DSC_ENABLE, &enable_dsc, 1); 845 drm_dbg_dp(dev, 846 "MST_DSC Sent DSC decoding enable to %s port, ret = %u\n", 847 (port->passthrough_aux) ? "remote RX" : 848 "virtual dpcd", 849 ret); 850 } else { 851 ret = drm_dp_dpcd_write(aconnector->dsc_aux, 852 DP_DSC_ENABLE, &enable_dsc, 1); 853 drm_dbg_dp(dev, 854 "MST_DSC Sent DSC decoding disable to %s port, ret = %u\n", 855 (port->passthrough_aux) ? "remote RX" : 856 "virtual dpcd", 857 ret); 858 859 if (port->passthrough_aux) { 860 ret = drm_dp_dpcd_write(port->passthrough_aux, 861 DP_DSC_ENABLE, 862 &enable_passthrough, 1); 863 drm_dbg_dp(dev, 864 "MST_DSC Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", 865 ret); 866 } 867 } 868 } 869 870 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { 871 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 872 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 873 drm_dbg_dp(dev, 874 "SST_DSC Send DSC %s to SST RX\n", 875 enable_dsc ? "enable" : "disable"); 876 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 877 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 878 drm_dbg_dp(dev, 879 "SST_DSC Send DSC %s to DP-HDMI PCON\n", 880 enable_dsc ? "enable" : "disable"); 881 } 882 } 883 884 return ret; 885 } 886 887 bool dm_helpers_is_dp_sink_present(struct dc_link *link) 888 { 889 bool dp_sink_present; 890 struct amdgpu_dm_connector *aconnector = link->priv; 891 892 if (!aconnector) { 893 BUG_ON("Failed to find connector for link!"); 894 return true; 895 } 896 897 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex); 898 dp_sink_present = dc_link_is_dp_sink_present(link); 899 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex); 900 return dp_sink_present; 901 } 902 903 static int 904 dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len) 905 { 906 struct drm_connector *connector = data; 907 struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev); 908 unsigned char start = block * EDID_LENGTH; 909 void *edid; 910 int r; 911 912 if (!acpidev) 913 return -ENODEV; 914 915 /* fetch the entire edid from BIOS */ 916 r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, &edid); 917 if (r < 0) { 918 drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r); 919 return r; 920 } 921 if (len > r || start > r || start + len > r) { 922 r = -EINVAL; 923 goto cleanup; 924 } 925 926 memcpy(buf, edid + start, len); 927 r = 0; 928 929 cleanup: 930 kfree(edid); 931 932 return r; 933 } 934 935 static const struct drm_edid * 936 dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector) 937 { 938 struct drm_connector *connector = &aconnector->base; 939 940 if (amdgpu_dc_debug_mask & DC_DISABLE_ACPI_EDID) 941 return NULL; 942 943 switch (connector->connector_type) { 944 case DRM_MODE_CONNECTOR_LVDS: 945 case DRM_MODE_CONNECTOR_eDP: 946 break; 947 default: 948 return NULL; 949 } 950 951 if (connector->force == DRM_FORCE_OFF) 952 return NULL; 953 954 return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector); 955 } 956 957 enum dc_edid_status dm_helpers_read_local_edid( 958 struct dc_context *ctx, 959 struct dc_link *link, 960 struct dc_sink *sink) 961 { 962 struct amdgpu_dm_connector *aconnector = link->priv; 963 struct drm_connector *connector = &aconnector->base; 964 struct i2c_adapter *ddc; 965 int retry = 3; 966 enum dc_edid_status edid_status; 967 const struct drm_edid *drm_edid; 968 const struct edid *edid; 969 970 if (link->aux_mode) 971 ddc = &aconnector->dm_dp_aux.aux.ddc; 972 else 973 ddc = &aconnector->i2c->base; 974 975 /* some dongles read edid incorrectly the first time, 976 * do check sum and retry to make sure read correct edid. 977 */ 978 do { 979 drm_edid = dm_helpers_read_acpi_edid(aconnector); 980 if (drm_edid) 981 drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name); 982 else 983 drm_edid = drm_edid_read_ddc(connector, ddc); 984 drm_edid_connector_update(connector, drm_edid); 985 986 /* DP Compliance Test 4.2.2.6 */ 987 if (link->aux_mode && connector->edid_corrupt) 988 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); 989 990 if (!drm_edid && connector->edid_corrupt) { 991 connector->edid_corrupt = false; 992 return EDID_BAD_CHECKSUM; 993 } 994 995 if (!drm_edid) 996 return EDID_NO_RESPONSE; 997 998 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 999 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); 1000 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); 1001 1002 /* We don't need the original edid anymore */ 1003 drm_edid_free(drm_edid); 1004 1005 edid_status = dm_helpers_parse_edid_caps( 1006 link, 1007 &sink->dc_edid, 1008 &sink->edid_caps); 1009 1010 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); 1011 1012 if (edid_status != EDID_OK) 1013 DRM_ERROR("EDID err: %d, on connector: %s", 1014 edid_status, 1015 aconnector->base.name); 1016 if (link->aux_mode) { 1017 union test_request test_request = {0}; 1018 union test_response test_response = {0}; 1019 1020 dm_helpers_dp_read_dpcd(ctx, 1021 link, 1022 DP_TEST_REQUEST, 1023 &test_request.raw, 1024 sizeof(union test_request)); 1025 1026 if (!test_request.bits.EDID_READ) 1027 return edid_status; 1028 1029 test_response.bits.EDID_CHECKSUM_WRITE = 1; 1030 1031 dm_helpers_dp_write_dpcd(ctx, 1032 link, 1033 DP_TEST_EDID_CHECKSUM, 1034 &sink->dc_edid.raw_edid[sink->dc_edid.length-1], 1035 1); 1036 1037 dm_helpers_dp_write_dpcd(ctx, 1038 link, 1039 DP_TEST_RESPONSE, 1040 &test_response.raw, 1041 sizeof(test_response)); 1042 1043 } 1044 1045 return edid_status; 1046 } 1047 int dm_helper_dmub_aux_transfer_sync( 1048 struct dc_context *ctx, 1049 const struct dc_link *link, 1050 struct aux_payload *payload, 1051 enum aux_return_code_type *operation_result) 1052 { 1053 if (!link->hpd_status) { 1054 *operation_result = AUX_RET_ERROR_HPD_DISCON; 1055 return -1; 1056 } 1057 1058 return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, 1059 operation_result); 1060 } 1061 1062 int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, 1063 const struct dc_link *link, 1064 struct set_config_cmd_payload *payload, 1065 enum set_config_status *operation_result) 1066 { 1067 return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload, 1068 operation_result); 1069 } 1070 1071 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) 1072 { 1073 /* TODO: something */ 1074 } 1075 1076 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us) 1077 { 1078 // TODO: 1079 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); 1080 } 1081 1082 void dm_helpers_init_panel_settings( 1083 struct dc_context *ctx, 1084 struct dc_panel_config *panel_config, 1085 struct dc_sink *sink) 1086 { 1087 // Extra Panel Power Sequence 1088 panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms; 1089 panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms; 1090 panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off; 1091 panel_config->pps.extra_post_t7_ms = 0; 1092 panel_config->pps.extra_pre_t11_ms = 0; 1093 panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms; 1094 panel_config->pps.extra_post_OUI_ms = 0; 1095 // Feature DSC 1096 panel_config->dsc.disable_dsc_edp = false; 1097 panel_config->dsc.force_dsc_edp_policy = 0; 1098 } 1099 1100 void dm_helpers_override_panel_settings( 1101 struct dc_context *ctx, 1102 struct dc_panel_config *panel_config) 1103 { 1104 // Feature DSC 1105 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1106 panel_config->dsc.disable_dsc_edp = true; 1107 } 1108 1109 void *dm_helpers_allocate_gpu_mem( 1110 struct dc_context *ctx, 1111 enum dc_gpu_mem_alloc_type type, 1112 size_t size, 1113 long long *addr) 1114 { 1115 struct amdgpu_device *adev = ctx->driver_context; 1116 1117 return dm_allocate_gpu_mem(adev, type, size, addr); 1118 } 1119 1120 void dm_helpers_free_gpu_mem( 1121 struct dc_context *ctx, 1122 enum dc_gpu_mem_alloc_type type, 1123 void *pvMem) 1124 { 1125 struct amdgpu_device *adev = ctx->driver_context; 1126 1127 dm_free_gpu_mem(adev, type, pvMem); 1128 } 1129 1130 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) 1131 { 1132 enum dc_irq_source irq_source; 1133 bool ret; 1134 1135 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 1136 1137 ret = dc_interrupt_set(ctx->dc, irq_source, enable); 1138 1139 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n", 1140 enable ? "en" : "dis", ret); 1141 return ret; 1142 } 1143 1144 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) 1145 { 1146 /* TODO: virtual DPCD */ 1147 struct dc_link *link = stream->link; 1148 union down_spread_ctrl old_downspread; 1149 union down_spread_ctrl new_downspread; 1150 1151 if (link->aux_access_disabled) 1152 return; 1153 1154 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 1155 &old_downspread.raw, 1156 sizeof(old_downspread))) 1157 return; 1158 1159 new_downspread.raw = old_downspread.raw; 1160 new_downspread.bits.IGNORE_MSA_TIMING_PARAM = 1161 (stream->ignore_msa_timing_param) ? 1 : 0; 1162 1163 if (new_downspread.raw != old_downspread.raw) 1164 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 1165 &new_downspread.raw, 1166 sizeof(new_downspread)); 1167 } 1168 1169 bool dm_helpers_dp_handle_test_pattern_request( 1170 struct dc_context *ctx, 1171 const struct dc_link *link, 1172 union link_test_pattern dpcd_test_pattern, 1173 union test_misc dpcd_test_params) 1174 { 1175 enum dp_test_pattern test_pattern; 1176 enum dp_test_pattern_color_space test_pattern_color_space = 1177 DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; 1178 enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED; 1179 enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED; 1180 struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; 1181 struct pipe_ctx *pipe_ctx = NULL; 1182 struct amdgpu_dm_connector *aconnector = link->priv; 1183 struct drm_device *dev = aconnector->base.dev; 1184 int i; 1185 1186 for (i = 0; i < MAX_PIPES; i++) { 1187 if (pipes[i].stream == NULL) 1188 continue; 1189 1190 if (pipes[i].stream->link == link && !pipes[i].top_pipe && 1191 !pipes[i].prev_odm_pipe) { 1192 pipe_ctx = &pipes[i]; 1193 break; 1194 } 1195 } 1196 1197 if (pipe_ctx == NULL) 1198 return false; 1199 1200 switch (dpcd_test_pattern.bits.PATTERN) { 1201 case LINK_TEST_PATTERN_COLOR_RAMP: 1202 test_pattern = DP_TEST_PATTERN_COLOR_RAMP; 1203 break; 1204 case LINK_TEST_PATTERN_VERTICAL_BARS: 1205 test_pattern = DP_TEST_PATTERN_VERTICAL_BARS; 1206 break; /* black and white */ 1207 case LINK_TEST_PATTERN_COLOR_SQUARES: 1208 test_pattern = (dpcd_test_params.bits.DYN_RANGE == 1209 TEST_DYN_RANGE_VESA ? 1210 DP_TEST_PATTERN_COLOR_SQUARES : 1211 DP_TEST_PATTERN_COLOR_SQUARES_CEA); 1212 break; 1213 default: 1214 test_pattern = DP_TEST_PATTERN_VIDEO_MODE; 1215 break; 1216 } 1217 1218 if (dpcd_test_params.bits.CLR_FORMAT == 0) 1219 test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; 1220 else 1221 test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? 1222 DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : 1223 DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; 1224 1225 switch (dpcd_test_params.bits.BPC) { 1226 case 0: // 6 bits 1227 requestColorDepth = COLOR_DEPTH_666; 1228 break; 1229 case 1: // 8 bits 1230 requestColorDepth = COLOR_DEPTH_888; 1231 break; 1232 case 2: // 10 bits 1233 requestColorDepth = COLOR_DEPTH_101010; 1234 break; 1235 case 3: // 12 bits 1236 requestColorDepth = COLOR_DEPTH_121212; 1237 break; 1238 default: 1239 break; 1240 } 1241 1242 switch (dpcd_test_params.bits.CLR_FORMAT) { 1243 case 0: 1244 requestPixelEncoding = PIXEL_ENCODING_RGB; 1245 break; 1246 case 1: 1247 requestPixelEncoding = PIXEL_ENCODING_YCBCR422; 1248 break; 1249 case 2: 1250 requestPixelEncoding = PIXEL_ENCODING_YCBCR444; 1251 break; 1252 default: 1253 requestPixelEncoding = PIXEL_ENCODING_RGB; 1254 break; 1255 } 1256 1257 if ((requestColorDepth != COLOR_DEPTH_UNDEFINED 1258 && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) 1259 || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED 1260 && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) { 1261 drm_dbg(dev, 1262 "original bpc %d pix encoding %d, changing to %d %d\n", 1263 pipe_ctx->stream->timing.display_color_depth, 1264 pipe_ctx->stream->timing.pixel_encoding, 1265 requestColorDepth, 1266 requestPixelEncoding); 1267 pipe_ctx->stream->timing.display_color_depth = requestColorDepth; 1268 pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding; 1269 1270 dc_link_update_dsc_config(pipe_ctx); 1271 1272 aconnector->timing_changed = true; 1273 /* store current timing */ 1274 if (aconnector->timing_requested) 1275 *aconnector->timing_requested = pipe_ctx->stream->timing; 1276 else 1277 drm_err(dev, "timing storage failed\n"); 1278 1279 } 1280 1281 pipe_ctx->stream->test_pattern.type = test_pattern; 1282 pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; 1283 1284 dc_link_dp_set_test_pattern( 1285 (struct dc_link *) link, 1286 test_pattern, 1287 test_pattern_color_space, 1288 NULL, 1289 NULL, 1290 0); 1291 1292 return false; 1293 } 1294 1295 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) 1296 { 1297 // TODO 1298 } 1299 1300 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) 1301 { 1302 struct amdgpu_device *adev = ctx->driver_context; 1303 1304 if (adev->dm.idle_workqueue) { 1305 adev->dm.idle_workqueue->enable = enable; 1306 if (enable && !adev->dm.idle_workqueue->running && amdgpu_dm_is_headless(adev)) 1307 schedule_work(&adev->dm.idle_workqueue->work); 1308 } 1309 } 1310 1311 void dm_helpers_dp_mst_update_branch_bandwidth( 1312 struct dc_context *ctx, 1313 struct dc_link *link) 1314 { 1315 // TODO 1316 } 1317 1318 static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id) 1319 { 1320 bool ret_val = false; 1321 1322 switch (branch_dev_id) { 1323 case DP_BRANCH_DEVICE_ID_0060AD: 1324 case DP_BRANCH_DEVICE_ID_00E04C: 1325 case DP_BRANCH_DEVICE_ID_90CC24: 1326 ret_val = true; 1327 break; 1328 default: 1329 break; 1330 } 1331 1332 return ret_val; 1333 } 1334 1335 enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link) 1336 { 1337 struct dpcd_caps *dpcd_caps = &link->dpcd_caps; 1338 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 1339 1340 switch (dpcd_caps->dongle_type) { 1341 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 1342 if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true && 1343 dpcd_caps->allow_invalid_MSA_timing_param == true && 1344 dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id)) 1345 as_type = FREESYNC_TYPE_PCON_IN_WHITELIST; 1346 break; 1347 default: 1348 break; 1349 } 1350 1351 return as_type; 1352 } 1353 1354 bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream) 1355 { 1356 // TODO 1357 return false; 1358 } 1359 1360 bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream) 1361 { 1362 // TODO 1363 return false; 1364 } 1365