1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/string.h> 27 #include <linux/acpi.h> 28 #include <linux/i2c.h> 29 30 #include <drm/drm_probe_helper.h> 31 #include <drm/amdgpu_drm.h> 32 #include <drm/drm_edid.h> 33 34 #include "dm_services.h" 35 #include "amdgpu.h" 36 #include "dc.h" 37 #include "amdgpu_dm.h" 38 #include "amdgpu_dm_irq.h" 39 #include "amdgpu_dm_mst_types.h" 40 41 #include "dm_helpers.h" 42 #include "ddc_service_types.h" 43 44 struct monitor_patch_info { 45 unsigned int manufacturer_id; 46 unsigned int product_id; 47 void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param); 48 unsigned int patch_param; 49 }; 50 static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param); 51 52 static const struct monitor_patch_info monitor_patch_table[] = { 53 {0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15}, 54 {0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15}, 55 }; 56 57 static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param) 58 { 59 if (edid_caps) 60 edid_caps->panel_patch.max_dsc_target_bpp_limit = param; 61 } 62 63 static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) 64 { 65 int i, ret = 0; 66 67 for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++) 68 if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id) 69 && (edid_caps->product_id == monitor_patch_table[i].product_id)) { 70 monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param); 71 ret++; 72 } 73 74 return ret; 75 } 76 77 /* dm_helpers_parse_edid_caps 78 * 79 * Parse edid caps 80 * 81 * @edid: [in] pointer to edid 82 * edid_caps: [in] pointer to edid caps 83 * @return 84 * void 85 * */ 86 enum dc_edid_status dm_helpers_parse_edid_caps( 87 struct dc_link *link, 88 const struct dc_edid *edid, 89 struct dc_edid_caps *edid_caps) 90 { 91 struct amdgpu_dm_connector *aconnector = link->priv; 92 struct drm_connector *connector = &aconnector->base; 93 struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; 94 struct cea_sad *sads; 95 int sad_count = -1; 96 int sadb_count = -1; 97 int i = 0; 98 uint8_t *sadb = NULL; 99 100 enum dc_edid_status result = EDID_OK; 101 102 if (!edid_caps || !edid) 103 return EDID_BAD_INPUT; 104 105 if (!drm_edid_is_valid(edid_buf)) 106 result = EDID_BAD_CHECKSUM; 107 108 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | 109 ((uint16_t) edid_buf->mfg_id[1])<<8; 110 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | 111 ((uint16_t) edid_buf->prod_code[1])<<8; 112 edid_caps->serial_number = edid_buf->serial; 113 edid_caps->manufacture_week = edid_buf->mfg_week; 114 edid_caps->manufacture_year = edid_buf->mfg_year; 115 116 drm_edid_get_monitor_name(edid_buf, 117 edid_caps->display_name, 118 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 119 120 edid_caps->edid_hdmi = connector->display_info.is_hdmi; 121 122 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); 123 if (sad_count <= 0) 124 return result; 125 126 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; 127 for (i = 0; i < edid_caps->audio_mode_count; ++i) { 128 struct cea_sad *sad = &sads[i]; 129 130 edid_caps->audio_modes[i].format_code = sad->format; 131 edid_caps->audio_modes[i].channel_count = sad->channels + 1; 132 edid_caps->audio_modes[i].sample_rate = sad->freq; 133 edid_caps->audio_modes[i].sample_size = sad->byte2; 134 } 135 136 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); 137 138 if (sadb_count < 0) { 139 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); 140 sadb_count = 0; 141 } 142 143 if (sadb_count) 144 edid_caps->speaker_flags = sadb[0]; 145 else 146 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; 147 148 kfree(sads); 149 kfree(sadb); 150 151 amdgpu_dm_patch_edid_caps(edid_caps); 152 153 return result; 154 } 155 156 static void get_payload_table( 157 struct amdgpu_dm_connector *aconnector, 158 struct dp_mst_stream_allocation_table *proposed_table) 159 { 160 int i; 161 struct drm_dp_mst_topology_mgr *mst_mgr = 162 &aconnector->mst_port->mst_mgr; 163 164 mutex_lock(&mst_mgr->payload_lock); 165 166 proposed_table->stream_count = 0; 167 168 /* number of active streams */ 169 for (i = 0; i < mst_mgr->max_payloads; i++) { 170 if (mst_mgr->payloads[i].num_slots == 0) 171 break; /* end of vcp_id table */ 172 173 ASSERT(mst_mgr->payloads[i].payload_state != 174 DP_PAYLOAD_DELETE_LOCAL); 175 176 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL || 177 mst_mgr->payloads[i].payload_state == 178 DP_PAYLOAD_REMOTE) { 179 180 struct dp_mst_stream_allocation *sa = 181 &proposed_table->stream_allocations[ 182 proposed_table->stream_count]; 183 184 sa->slot_count = mst_mgr->payloads[i].num_slots; 185 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi; 186 proposed_table->stream_count++; 187 } 188 } 189 190 mutex_unlock(&mst_mgr->payload_lock); 191 } 192 193 void dm_helpers_dp_update_branch_info( 194 struct dc_context *ctx, 195 const struct dc_link *link) 196 {} 197 198 /* 199 * Writes payload allocation table in immediate downstream device. 200 */ 201 bool dm_helpers_dp_mst_write_payload_allocation_table( 202 struct dc_context *ctx, 203 const struct dc_stream_state *stream, 204 struct dp_mst_stream_allocation_table *proposed_table, 205 bool enable) 206 { 207 struct amdgpu_dm_connector *aconnector; 208 struct dm_connector_state *dm_conn_state; 209 struct drm_dp_mst_topology_mgr *mst_mgr; 210 struct drm_dp_mst_port *mst_port; 211 bool ret; 212 u8 link_coding_cap = DP_8b_10b_ENCODING; 213 214 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 215 /* Accessing the connector state is required for vcpi_slots allocation 216 * and directly relies on behaviour in commit check 217 * that blocks before commit guaranteeing that the state 218 * is not gonna be swapped while still in use in commit tail */ 219 220 if (!aconnector || !aconnector->mst_port) 221 return false; 222 223 dm_conn_state = to_dm_connector_state(aconnector->base.state); 224 225 mst_mgr = &aconnector->mst_port->mst_mgr; 226 227 if (!mst_mgr->mst_state) 228 return false; 229 230 mst_port = aconnector->port; 231 232 #if defined(CONFIG_DRM_AMD_DC_DCN) 233 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 234 #endif 235 236 if (enable) { 237 238 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, 239 dm_conn_state->pbn, 240 dm_conn_state->vcpi_slots); 241 if (!ret) 242 return false; 243 244 } else { 245 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); 246 } 247 248 /* It's OK for this to fail */ 249 drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1); 250 251 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 252 * AUX message. The sequence is slot 1-63 allocated sequence for each 253 * stream. AMD ASIC stream slot allocation should follow the same 254 * sequence. copy DRM MST allocation to dc */ 255 256 get_payload_table(aconnector, proposed_table); 257 258 return true; 259 } 260 261 /* 262 * poll pending down reply 263 */ 264 void dm_helpers_dp_mst_poll_pending_down_reply( 265 struct dc_context *ctx, 266 const struct dc_link *link) 267 {} 268 269 /* 270 * Clear payload allocation table before enable MST DP link. 271 */ 272 void dm_helpers_dp_mst_clear_payload_allocation_table( 273 struct dc_context *ctx, 274 const struct dc_link *link) 275 {} 276 277 /* 278 * Polls for ACT (allocation change trigger) handled and sends 279 * ALLOCATE_PAYLOAD message. 280 */ 281 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( 282 struct dc_context *ctx, 283 const struct dc_stream_state *stream) 284 { 285 struct amdgpu_dm_connector *aconnector; 286 struct drm_dp_mst_topology_mgr *mst_mgr; 287 int ret; 288 289 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 290 291 if (!aconnector || !aconnector->mst_port) 292 return ACT_FAILED; 293 294 mst_mgr = &aconnector->mst_port->mst_mgr; 295 296 if (!mst_mgr->mst_state) 297 return ACT_FAILED; 298 299 ret = drm_dp_check_act_status(mst_mgr); 300 301 if (ret) 302 return ACT_FAILED; 303 304 return ACT_SUCCESS; 305 } 306 307 bool dm_helpers_dp_mst_send_payload_allocation( 308 struct dc_context *ctx, 309 const struct dc_stream_state *stream, 310 bool enable) 311 { 312 struct amdgpu_dm_connector *aconnector; 313 struct drm_dp_mst_topology_mgr *mst_mgr; 314 struct drm_dp_mst_port *mst_port; 315 316 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 317 318 if (!aconnector || !aconnector->mst_port) 319 return false; 320 321 mst_port = aconnector->port; 322 323 mst_mgr = &aconnector->mst_port->mst_mgr; 324 325 if (!mst_mgr->mst_state) 326 return false; 327 328 /* It's OK for this to fail */ 329 drm_dp_update_payload_part2(mst_mgr); 330 331 if (!enable) 332 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); 333 334 return true; 335 } 336 337 void dm_dtn_log_begin(struct dc_context *ctx, 338 struct dc_log_buffer_ctx *log_ctx) 339 { 340 static const char msg[] = "[dtn begin]\n"; 341 342 if (!log_ctx) { 343 pr_info("%s", msg); 344 return; 345 } 346 347 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 348 } 349 350 __printf(3, 4) 351 void dm_dtn_log_append_v(struct dc_context *ctx, 352 struct dc_log_buffer_ctx *log_ctx, 353 const char *msg, ...) 354 { 355 va_list args; 356 size_t total; 357 int n; 358 359 if (!log_ctx) { 360 /* No context, redirect to dmesg. */ 361 struct va_format vaf; 362 363 vaf.fmt = msg; 364 vaf.va = &args; 365 366 va_start(args, msg); 367 pr_info("%pV", &vaf); 368 va_end(args); 369 370 return; 371 } 372 373 /* Measure the output. */ 374 va_start(args, msg); 375 n = vsnprintf(NULL, 0, msg, args); 376 va_end(args); 377 378 if (n <= 0) 379 return; 380 381 /* Reallocate the string buffer as needed. */ 382 total = log_ctx->pos + n + 1; 383 384 if (total > log_ctx->size) { 385 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL); 386 387 if (buf) { 388 memcpy(buf, log_ctx->buf, log_ctx->pos); 389 kfree(log_ctx->buf); 390 391 log_ctx->buf = buf; 392 log_ctx->size = total; 393 } 394 } 395 396 if (!log_ctx->buf) 397 return; 398 399 /* Write the formatted string to the log buffer. */ 400 va_start(args, msg); 401 n = vscnprintf( 402 log_ctx->buf + log_ctx->pos, 403 log_ctx->size - log_ctx->pos, 404 msg, 405 args); 406 va_end(args); 407 408 if (n > 0) 409 log_ctx->pos += n; 410 } 411 412 void dm_dtn_log_end(struct dc_context *ctx, 413 struct dc_log_buffer_ctx *log_ctx) 414 { 415 static const char msg[] = "[dtn end]\n"; 416 417 if (!log_ctx) { 418 pr_info("%s", msg); 419 return; 420 } 421 422 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 423 } 424 425 bool dm_helpers_dp_mst_start_top_mgr( 426 struct dc_context *ctx, 427 const struct dc_link *link, 428 bool boot) 429 { 430 struct amdgpu_dm_connector *aconnector = link->priv; 431 432 if (!aconnector) { 433 DRM_ERROR("Failed to find connector for link!"); 434 return false; 435 } 436 437 if (boot) { 438 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", 439 aconnector, aconnector->base.base.id); 440 return true; 441 } 442 443 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", 444 aconnector, aconnector->base.base.id); 445 446 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); 447 } 448 449 bool dm_helpers_dp_mst_stop_top_mgr( 450 struct dc_context *ctx, 451 struct dc_link *link) 452 { 453 struct amdgpu_dm_connector *aconnector = link->priv; 454 455 if (!aconnector) { 456 DRM_ERROR("Failed to find connector for link!"); 457 return false; 458 } 459 460 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", 461 aconnector, aconnector->base.base.id); 462 463 if (aconnector->mst_mgr.mst_state == true) { 464 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); 465 link->cur_link_settings.lane_count = 0; 466 } 467 468 return false; 469 } 470 471 bool dm_helpers_dp_read_dpcd( 472 struct dc_context *ctx, 473 const struct dc_link *link, 474 uint32_t address, 475 uint8_t *data, 476 uint32_t size) 477 { 478 479 struct amdgpu_dm_connector *aconnector = link->priv; 480 481 if (!aconnector) { 482 DC_LOG_DC("Failed to find connector for link!\n"); 483 return false; 484 } 485 486 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, 487 data, size) > 0; 488 } 489 490 bool dm_helpers_dp_write_dpcd( 491 struct dc_context *ctx, 492 const struct dc_link *link, 493 uint32_t address, 494 const uint8_t *data, 495 uint32_t size) 496 { 497 struct amdgpu_dm_connector *aconnector = link->priv; 498 499 if (!aconnector) { 500 DRM_ERROR("Failed to find connector for link!"); 501 return false; 502 } 503 504 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, 505 address, (uint8_t *)data, size) > 0; 506 } 507 508 bool dm_helpers_submit_i2c( 509 struct dc_context *ctx, 510 const struct dc_link *link, 511 struct i2c_command *cmd) 512 { 513 struct amdgpu_dm_connector *aconnector = link->priv; 514 struct i2c_msg *msgs; 515 int i = 0; 516 int num = cmd->number_of_payloads; 517 bool result; 518 519 if (!aconnector) { 520 DRM_ERROR("Failed to find connector for link!"); 521 return false; 522 } 523 524 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); 525 526 if (!msgs) 527 return false; 528 529 for (i = 0; i < num; i++) { 530 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; 531 msgs[i].addr = cmd->payloads[i].address; 532 msgs[i].len = cmd->payloads[i].length; 533 msgs[i].buf = cmd->payloads[i].data; 534 } 535 536 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; 537 538 kfree(msgs); 539 540 return result; 541 } 542 543 #if defined(CONFIG_DRM_AMD_DC_DCN) 544 static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, 545 bool is_write_cmd, 546 unsigned char cmd, 547 unsigned int length, 548 unsigned int offset, 549 unsigned char *data) 550 { 551 bool success = false; 552 unsigned char rc_data[16] = {0}; 553 unsigned char rc_offset[4] = {0}; 554 unsigned char rc_length[2] = {0}; 555 unsigned char rc_cmd = 0; 556 unsigned char rc_result = 0xFF; 557 unsigned char i = 0; 558 uint8_t ret = 0; 559 560 if (is_write_cmd) { 561 // write rc data 562 memmove(rc_data, data, length); 563 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); 564 } 565 566 // write rc offset 567 rc_offset[0] = (unsigned char) offset & 0xFF; 568 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF; 569 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; 570 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; 571 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); 572 573 // write rc length 574 rc_length[0] = (unsigned char) length & 0xFF; 575 rc_length[1] = (unsigned char) (length >> 8) & 0xFF; 576 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); 577 578 // write rc cmd 579 rc_cmd = cmd | 0x80; 580 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 581 582 if (ret < 0) { 583 DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret); 584 return false; 585 } 586 587 // poll until active is 0 588 for (i = 0; i < 10; i++) { 589 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 590 if (rc_cmd == cmd) 591 // active is 0 592 break; 593 msleep(10); 594 } 595 596 // read rc result 597 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result)); 598 success = (rc_result == 0); 599 600 if (success && !is_write_cmd) { 601 // read rc data 602 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); 603 } 604 605 DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success); 606 607 return success; 608 } 609 610 static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) 611 { 612 unsigned char data[16] = {0}; 613 614 DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n"); 615 616 // Step 2 617 data[0] = 'P'; 618 data[1] = 'R'; 619 data[2] = 'I'; 620 data[3] = 'U'; 621 data[4] = 'S'; 622 623 if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data)) 624 return; 625 626 // Step 3 and 4 627 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 628 return; 629 630 data[0] &= (~(1 << 1)); // set bit 1 to 0 631 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 632 return; 633 634 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 635 return; 636 637 data[0] &= (~(1 << 1)); // set bit 1 to 0 638 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data)) 639 return; 640 641 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 642 return; 643 644 data[0] &= (~(1 << 1)); // set bit 1 to 0 645 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 646 return; 647 648 // Step 3 and 5 649 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 650 return; 651 652 data[0] |= (1 << 1); // set bit 1 to 1 653 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 654 return; 655 656 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 657 return; 658 659 data[0] |= (1 << 1); // set bit 1 to 1 660 return; 661 662 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 663 return; 664 665 data[0] |= (1 << 1); // set bit 1 to 1 666 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 667 return; 668 669 // Step 6 670 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) 671 return; 672 673 DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n"); 674 } 675 676 static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( 677 struct drm_dp_aux *aux, 678 const struct dc_stream_state *stream, 679 bool enable) 680 { 681 uint8_t ret = 0; 682 683 DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n"); 684 685 if (enable) { 686 /* When DSC is enabled on previous boot and reboot with the hub, 687 * there is a chance that Synaptics hub gets stuck during reboot sequence. 688 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream 689 */ 690 if (!stream->link->link_status.link_active && 691 memcmp(stream->link->dpcd_caps.branch_dev_name, 692 (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0) 693 apply_synaptics_fifo_reset_wa(aux); 694 695 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 696 DRM_INFO("Send DSC enable to synaptics\n"); 697 698 } else { 699 /* Synaptics hub not support virtual dpcd, 700 * external monitor occur garbage while disable DSC, 701 * Disable DSC only when entire link status turn to false, 702 */ 703 if (!stream->link->link_status.link_active) { 704 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 705 DRM_INFO("Send DSC disable to synaptics\n"); 706 } 707 } 708 709 return ret; 710 } 711 #endif 712 713 bool dm_helpers_dp_write_dsc_enable( 714 struct dc_context *ctx, 715 const struct dc_stream_state *stream, 716 bool enable) 717 { 718 uint8_t enable_dsc = enable ? 1 : 0; 719 struct amdgpu_dm_connector *aconnector; 720 uint8_t ret = 0; 721 722 if (!stream) 723 return false; 724 725 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 726 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 727 728 if (!aconnector->dsc_aux) 729 return false; 730 731 #if defined(CONFIG_DRM_AMD_DC_DCN) 732 // apply w/a to synaptics 733 if (needs_dsc_aux_workaround(aconnector->dc_link) && 734 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) 735 return write_dsc_enable_synaptics_non_virtual_dpcd_mst( 736 aconnector->dsc_aux, stream, enable_dsc); 737 #endif 738 739 ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); 740 DC_LOG_DC("Send DSC %s to MST RX\n", enable_dsc ? "enable" : "disable"); 741 } 742 743 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { 744 #if defined(CONFIG_DRM_AMD_DC_DCN) 745 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 746 #endif 747 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 748 DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); 749 #if defined(CONFIG_DRM_AMD_DC_DCN) 750 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 751 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 752 DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); 753 } 754 #endif 755 } 756 757 return (ret > 0); 758 } 759 760 bool dm_helpers_is_dp_sink_present(struct dc_link *link) 761 { 762 bool dp_sink_present; 763 struct amdgpu_dm_connector *aconnector = link->priv; 764 765 if (!aconnector) { 766 BUG_ON("Failed to find connector for link!"); 767 return true; 768 } 769 770 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex); 771 dp_sink_present = dc_link_is_dp_sink_present(link); 772 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex); 773 return dp_sink_present; 774 } 775 776 enum dc_edid_status dm_helpers_read_local_edid( 777 struct dc_context *ctx, 778 struct dc_link *link, 779 struct dc_sink *sink) 780 { 781 struct amdgpu_dm_connector *aconnector = link->priv; 782 struct drm_connector *connector = &aconnector->base; 783 struct i2c_adapter *ddc; 784 int retry = 3; 785 enum dc_edid_status edid_status; 786 struct edid *edid; 787 788 if (link->aux_mode) 789 ddc = &aconnector->dm_dp_aux.aux.ddc; 790 else 791 ddc = &aconnector->i2c->base; 792 793 /* some dongles read edid incorrectly the first time, 794 * do check sum and retry to make sure read correct edid. 795 */ 796 do { 797 798 edid = drm_get_edid(&aconnector->base, ddc); 799 800 /* DP Compliance Test 4.2.2.6 */ 801 if (link->aux_mode && connector->edid_corrupt) 802 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); 803 804 if (!edid && connector->edid_corrupt) { 805 connector->edid_corrupt = false; 806 return EDID_BAD_CHECKSUM; 807 } 808 809 if (!edid) 810 return EDID_NO_RESPONSE; 811 812 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); 813 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); 814 815 /* We don't need the original edid anymore */ 816 kfree(edid); 817 818 edid_status = dm_helpers_parse_edid_caps( 819 link, 820 &sink->dc_edid, 821 &sink->edid_caps); 822 823 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); 824 825 if (edid_status != EDID_OK) 826 DRM_ERROR("EDID err: %d, on connector: %s", 827 edid_status, 828 aconnector->base.name); 829 830 /* DP Compliance Test 4.2.2.3 */ 831 if (link->aux_mode) 832 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]); 833 834 return edid_status; 835 } 836 int dm_helper_dmub_aux_transfer_sync( 837 struct dc_context *ctx, 838 const struct dc_link *link, 839 struct aux_payload *payload, 840 enum aux_return_code_type *operation_result) 841 { 842 return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx, 843 link->link_index, (void *)payload, 844 (void *)operation_result); 845 } 846 847 int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, 848 const struct dc_link *link, 849 struct set_config_cmd_payload *payload, 850 enum set_config_status *operation_result) 851 { 852 return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx, 853 link->link_index, (void *)payload, 854 (void *)operation_result); 855 } 856 857 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) 858 { 859 /* TODO: something */ 860 } 861 862 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us) 863 { 864 // TODO: 865 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); 866 } 867 868 void *dm_helpers_allocate_gpu_mem( 869 struct dc_context *ctx, 870 enum dc_gpu_mem_alloc_type type, 871 size_t size, 872 long long *addr) 873 { 874 struct amdgpu_device *adev = ctx->driver_context; 875 struct dal_allocation *da; 876 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? 877 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; 878 int ret; 879 880 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); 881 if (!da) 882 return NULL; 883 884 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 885 domain, &da->bo, 886 &da->gpu_addr, &da->cpu_ptr); 887 888 *addr = da->gpu_addr; 889 890 if (ret) { 891 kfree(da); 892 return NULL; 893 } 894 895 /* add da to list in dm */ 896 list_add(&da->list, &adev->dm.da_list); 897 898 return da->cpu_ptr; 899 } 900 901 void dm_helpers_free_gpu_mem( 902 struct dc_context *ctx, 903 enum dc_gpu_mem_alloc_type type, 904 void *pvMem) 905 { 906 struct amdgpu_device *adev = ctx->driver_context; 907 struct dal_allocation *da; 908 909 /* walk the da list in DM */ 910 list_for_each_entry(da, &adev->dm.da_list, list) { 911 if (pvMem == da->cpu_ptr) { 912 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 913 list_del(&da->list); 914 kfree(da); 915 break; 916 } 917 } 918 } 919 920 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) 921 { 922 enum dc_irq_source irq_source; 923 bool ret; 924 925 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 926 927 ret = dc_interrupt_set(ctx->dc, irq_source, enable); 928 929 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n", 930 enable ? "en" : "dis", ret); 931 return ret; 932 } 933 934 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) 935 { 936 /* TODO: virtual DPCD */ 937 struct dc_link *link = stream->link; 938 union down_spread_ctrl old_downspread; 939 union down_spread_ctrl new_downspread; 940 941 if (link->aux_access_disabled) 942 return; 943 944 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 945 &old_downspread.raw, 946 sizeof(old_downspread))) 947 return; 948 949 new_downspread.raw = old_downspread.raw; 950 new_downspread.bits.IGNORE_MSA_TIMING_PARAM = 951 (stream->ignore_msa_timing_param) ? 1 : 0; 952 953 if (new_downspread.raw != old_downspread.raw) 954 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 955 &new_downspread.raw, 956 sizeof(new_downspread)); 957 } 958 959 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) 960 { 961 // TODO 962 } 963 964 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) 965 { 966 /* TODO: add periodic detection implementation */ 967 } 968