1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/string.h> 27 #include <linux/acpi.h> 28 #include <linux/i2c.h> 29 30 #include <drm/drm_probe_helper.h> 31 #include <drm/amdgpu_drm.h> 32 #include <drm/drm_edid.h> 33 34 #include "dm_services.h" 35 #include "amdgpu.h" 36 #include "dc.h" 37 #include "amdgpu_dm.h" 38 #include "amdgpu_dm_irq.h" 39 #include "amdgpu_dm_mst_types.h" 40 41 #include "dm_helpers.h" 42 #include "ddc_service_types.h" 43 44 struct monitor_patch_info { 45 unsigned int manufacturer_id; 46 unsigned int product_id; 47 void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param); 48 unsigned int patch_param; 49 }; 50 static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param); 51 52 static const struct monitor_patch_info monitor_patch_table[] = { 53 {0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15}, 54 {0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15}, 55 }; 56 57 static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param) 58 { 59 if (edid_caps) 60 edid_caps->panel_patch.max_dsc_target_bpp_limit = param; 61 } 62 63 static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) 64 { 65 int i, ret = 0; 66 67 for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++) 68 if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id) 69 && (edid_caps->product_id == monitor_patch_table[i].product_id)) { 70 monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param); 71 ret++; 72 } 73 74 return ret; 75 } 76 77 /* dm_helpers_parse_edid_caps 78 * 79 * Parse edid caps 80 * 81 * @edid: [in] pointer to edid 82 * edid_caps: [in] pointer to edid caps 83 * @return 84 * void 85 * */ 86 enum dc_edid_status dm_helpers_parse_edid_caps( 87 struct dc_link *link, 88 const struct dc_edid *edid, 89 struct dc_edid_caps *edid_caps) 90 { 91 struct amdgpu_dm_connector *aconnector = link->priv; 92 struct drm_connector *connector = &aconnector->base; 93 struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; 94 struct cea_sad *sads; 95 int sad_count = -1; 96 int sadb_count = -1; 97 int i = 0; 98 uint8_t *sadb = NULL; 99 100 enum dc_edid_status result = EDID_OK; 101 102 if (!edid_caps || !edid) 103 return EDID_BAD_INPUT; 104 105 if (!drm_edid_is_valid(edid_buf)) 106 result = EDID_BAD_CHECKSUM; 107 108 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | 109 ((uint16_t) edid_buf->mfg_id[1])<<8; 110 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | 111 ((uint16_t) edid_buf->prod_code[1])<<8; 112 edid_caps->serial_number = edid_buf->serial; 113 edid_caps->manufacture_week = edid_buf->mfg_week; 114 edid_caps->manufacture_year = edid_buf->mfg_year; 115 116 drm_edid_get_monitor_name(edid_buf, 117 edid_caps->display_name, 118 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 119 120 edid_caps->edid_hdmi = connector->display_info.is_hdmi; 121 122 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); 123 if (sad_count <= 0) 124 return result; 125 126 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; 127 for (i = 0; i < edid_caps->audio_mode_count; ++i) { 128 struct cea_sad *sad = &sads[i]; 129 130 edid_caps->audio_modes[i].format_code = sad->format; 131 edid_caps->audio_modes[i].channel_count = sad->channels + 1; 132 edid_caps->audio_modes[i].sample_rate = sad->freq; 133 edid_caps->audio_modes[i].sample_size = sad->byte2; 134 } 135 136 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); 137 138 if (sadb_count < 0) { 139 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); 140 sadb_count = 0; 141 } 142 143 if (sadb_count) 144 edid_caps->speaker_flags = sadb[0]; 145 else 146 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; 147 148 kfree(sads); 149 kfree(sadb); 150 151 amdgpu_dm_patch_edid_caps(edid_caps); 152 153 return result; 154 } 155 156 static void get_payload_table( 157 struct amdgpu_dm_connector *aconnector, 158 struct dp_mst_stream_allocation_table *proposed_table) 159 { 160 int i; 161 struct drm_dp_mst_topology_mgr *mst_mgr = 162 &aconnector->mst_port->mst_mgr; 163 164 mutex_lock(&mst_mgr->payload_lock); 165 166 proposed_table->stream_count = 0; 167 168 /* number of active streams */ 169 for (i = 0; i < mst_mgr->max_payloads; i++) { 170 if (mst_mgr->payloads[i].num_slots == 0) 171 break; /* end of vcp_id table */ 172 173 ASSERT(mst_mgr->payloads[i].payload_state != 174 DP_PAYLOAD_DELETE_LOCAL); 175 176 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL || 177 mst_mgr->payloads[i].payload_state == 178 DP_PAYLOAD_REMOTE) { 179 180 struct dp_mst_stream_allocation *sa = 181 &proposed_table->stream_allocations[ 182 proposed_table->stream_count]; 183 184 sa->slot_count = mst_mgr->payloads[i].num_slots; 185 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi; 186 proposed_table->stream_count++; 187 } 188 } 189 190 mutex_unlock(&mst_mgr->payload_lock); 191 } 192 193 void dm_helpers_dp_update_branch_info( 194 struct dc_context *ctx, 195 const struct dc_link *link) 196 {} 197 198 /* 199 * Writes payload allocation table in immediate downstream device. 200 */ 201 bool dm_helpers_dp_mst_write_payload_allocation_table( 202 struct dc_context *ctx, 203 const struct dc_stream_state *stream, 204 struct dp_mst_stream_allocation_table *proposed_table, 205 bool enable) 206 { 207 struct amdgpu_dm_connector *aconnector; 208 struct dm_connector_state *dm_conn_state; 209 struct drm_dp_mst_topology_mgr *mst_mgr; 210 struct drm_dp_mst_port *mst_port; 211 bool ret; 212 u8 link_coding_cap = DP_8b_10b_ENCODING; 213 214 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 215 /* Accessing the connector state is required for vcpi_slots allocation 216 * and directly relies on behaviour in commit check 217 * that blocks before commit guaranteeing that the state 218 * is not gonna be swapped while still in use in commit tail */ 219 220 if (!aconnector || !aconnector->mst_port) 221 return false; 222 223 dm_conn_state = to_dm_connector_state(aconnector->base.state); 224 225 mst_mgr = &aconnector->mst_port->mst_mgr; 226 227 if (!mst_mgr->mst_state) 228 return false; 229 230 mst_port = aconnector->port; 231 232 #if defined(CONFIG_DRM_AMD_DC_DCN) 233 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 234 #endif 235 236 if (enable) { 237 238 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, 239 dm_conn_state->pbn, 240 dm_conn_state->vcpi_slots); 241 if (!ret) 242 return false; 243 244 } else { 245 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); 246 } 247 248 /* It's OK for this to fail */ 249 drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1); 250 251 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 252 * AUX message. The sequence is slot 1-63 allocated sequence for each 253 * stream. AMD ASIC stream slot allocation should follow the same 254 * sequence. copy DRM MST allocation to dc */ 255 256 get_payload_table(aconnector, proposed_table); 257 258 return true; 259 } 260 261 /* 262 * poll pending down reply 263 */ 264 void dm_helpers_dp_mst_poll_pending_down_reply( 265 struct dc_context *ctx, 266 const struct dc_link *link) 267 {} 268 269 /* 270 * Clear payload allocation table before enable MST DP link. 271 */ 272 void dm_helpers_dp_mst_clear_payload_allocation_table( 273 struct dc_context *ctx, 274 const struct dc_link *link) 275 {} 276 277 /* 278 * Polls for ACT (allocation change trigger) handled and sends 279 * ALLOCATE_PAYLOAD message. 280 */ 281 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( 282 struct dc_context *ctx, 283 const struct dc_stream_state *stream) 284 { 285 struct amdgpu_dm_connector *aconnector; 286 struct drm_dp_mst_topology_mgr *mst_mgr; 287 int ret; 288 289 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 290 291 if (!aconnector || !aconnector->mst_port) 292 return ACT_FAILED; 293 294 mst_mgr = &aconnector->mst_port->mst_mgr; 295 296 if (!mst_mgr->mst_state) 297 return ACT_FAILED; 298 299 ret = drm_dp_check_act_status(mst_mgr); 300 301 if (ret) 302 return ACT_FAILED; 303 304 return ACT_SUCCESS; 305 } 306 307 bool dm_helpers_dp_mst_send_payload_allocation( 308 struct dc_context *ctx, 309 const struct dc_stream_state *stream, 310 bool enable) 311 { 312 struct amdgpu_dm_connector *aconnector; 313 struct drm_dp_mst_topology_mgr *mst_mgr; 314 struct drm_dp_mst_port *mst_port; 315 enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; 316 enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; 317 318 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 319 320 if (!aconnector || !aconnector->mst_port) 321 return false; 322 323 mst_port = aconnector->port; 324 325 mst_mgr = &aconnector->mst_port->mst_mgr; 326 327 if (!mst_mgr->mst_state) 328 return false; 329 330 if (!enable) { 331 set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; 332 clr_flag = MST_ALLOCATE_NEW_PAYLOAD; 333 } 334 335 if (drm_dp_update_payload_part2(mst_mgr)) { 336 amdgpu_dm_set_mst_status(&aconnector->mst_status, 337 set_flag, false); 338 } else { 339 amdgpu_dm_set_mst_status(&aconnector->mst_status, 340 set_flag, true); 341 amdgpu_dm_set_mst_status(&aconnector->mst_status, 342 clr_flag, false); 343 } 344 345 if (!enable) 346 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); 347 348 return true; 349 } 350 351 void dm_dtn_log_begin(struct dc_context *ctx, 352 struct dc_log_buffer_ctx *log_ctx) 353 { 354 static const char msg[] = "[dtn begin]\n"; 355 356 if (!log_ctx) { 357 pr_info("%s", msg); 358 return; 359 } 360 361 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 362 } 363 364 __printf(3, 4) 365 void dm_dtn_log_append_v(struct dc_context *ctx, 366 struct dc_log_buffer_ctx *log_ctx, 367 const char *msg, ...) 368 { 369 va_list args; 370 size_t total; 371 int n; 372 373 if (!log_ctx) { 374 /* No context, redirect to dmesg. */ 375 struct va_format vaf; 376 377 vaf.fmt = msg; 378 vaf.va = &args; 379 380 va_start(args, msg); 381 pr_info("%pV", &vaf); 382 va_end(args); 383 384 return; 385 } 386 387 /* Measure the output. */ 388 va_start(args, msg); 389 n = vsnprintf(NULL, 0, msg, args); 390 va_end(args); 391 392 if (n <= 0) 393 return; 394 395 /* Reallocate the string buffer as needed. */ 396 total = log_ctx->pos + n + 1; 397 398 if (total > log_ctx->size) { 399 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL); 400 401 if (buf) { 402 memcpy(buf, log_ctx->buf, log_ctx->pos); 403 kfree(log_ctx->buf); 404 405 log_ctx->buf = buf; 406 log_ctx->size = total; 407 } 408 } 409 410 if (!log_ctx->buf) 411 return; 412 413 /* Write the formatted string to the log buffer. */ 414 va_start(args, msg); 415 n = vscnprintf( 416 log_ctx->buf + log_ctx->pos, 417 log_ctx->size - log_ctx->pos, 418 msg, 419 args); 420 va_end(args); 421 422 if (n > 0) 423 log_ctx->pos += n; 424 } 425 426 void dm_dtn_log_end(struct dc_context *ctx, 427 struct dc_log_buffer_ctx *log_ctx) 428 { 429 static const char msg[] = "[dtn end]\n"; 430 431 if (!log_ctx) { 432 pr_info("%s", msg); 433 return; 434 } 435 436 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 437 } 438 439 bool dm_helpers_dp_mst_start_top_mgr( 440 struct dc_context *ctx, 441 const struct dc_link *link, 442 bool boot) 443 { 444 struct amdgpu_dm_connector *aconnector = link->priv; 445 446 if (!aconnector) { 447 DRM_ERROR("Failed to find connector for link!"); 448 return false; 449 } 450 451 if (boot) { 452 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", 453 aconnector, aconnector->base.base.id); 454 return true; 455 } 456 457 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", 458 aconnector, aconnector->base.base.id); 459 460 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); 461 } 462 463 bool dm_helpers_dp_mst_stop_top_mgr( 464 struct dc_context *ctx, 465 struct dc_link *link) 466 { 467 struct amdgpu_dm_connector *aconnector = link->priv; 468 469 if (!aconnector) { 470 DRM_ERROR("Failed to find connector for link!"); 471 return false; 472 } 473 474 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", 475 aconnector, aconnector->base.base.id); 476 477 if (aconnector->mst_mgr.mst_state == true) { 478 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); 479 link->cur_link_settings.lane_count = 0; 480 } 481 482 return false; 483 } 484 485 bool dm_helpers_dp_read_dpcd( 486 struct dc_context *ctx, 487 const struct dc_link *link, 488 uint32_t address, 489 uint8_t *data, 490 uint32_t size) 491 { 492 493 struct amdgpu_dm_connector *aconnector = link->priv; 494 495 if (!aconnector) { 496 DC_LOG_DC("Failed to find connector for link!\n"); 497 return false; 498 } 499 500 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, 501 data, size) > 0; 502 } 503 504 bool dm_helpers_dp_write_dpcd( 505 struct dc_context *ctx, 506 const struct dc_link *link, 507 uint32_t address, 508 const uint8_t *data, 509 uint32_t size) 510 { 511 struct amdgpu_dm_connector *aconnector = link->priv; 512 513 if (!aconnector) { 514 DRM_ERROR("Failed to find connector for link!"); 515 return false; 516 } 517 518 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, 519 address, (uint8_t *)data, size) > 0; 520 } 521 522 bool dm_helpers_submit_i2c( 523 struct dc_context *ctx, 524 const struct dc_link *link, 525 struct i2c_command *cmd) 526 { 527 struct amdgpu_dm_connector *aconnector = link->priv; 528 struct i2c_msg *msgs; 529 int i = 0; 530 int num = cmd->number_of_payloads; 531 bool result; 532 533 if (!aconnector) { 534 DRM_ERROR("Failed to find connector for link!"); 535 return false; 536 } 537 538 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); 539 540 if (!msgs) 541 return false; 542 543 for (i = 0; i < num; i++) { 544 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; 545 msgs[i].addr = cmd->payloads[i].address; 546 msgs[i].len = cmd->payloads[i].length; 547 msgs[i].buf = cmd->payloads[i].data; 548 } 549 550 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; 551 552 kfree(msgs); 553 554 return result; 555 } 556 557 #if defined(CONFIG_DRM_AMD_DC_DCN) 558 static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, 559 bool is_write_cmd, 560 unsigned char cmd, 561 unsigned int length, 562 unsigned int offset, 563 unsigned char *data) 564 { 565 bool success = false; 566 unsigned char rc_data[16] = {0}; 567 unsigned char rc_offset[4] = {0}; 568 unsigned char rc_length[2] = {0}; 569 unsigned char rc_cmd = 0; 570 unsigned char rc_result = 0xFF; 571 unsigned char i = 0; 572 int ret; 573 574 if (is_write_cmd) { 575 // write rc data 576 memmove(rc_data, data, length); 577 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); 578 } 579 580 // write rc offset 581 rc_offset[0] = (unsigned char) offset & 0xFF; 582 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF; 583 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; 584 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; 585 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); 586 587 // write rc length 588 rc_length[0] = (unsigned char) length & 0xFF; 589 rc_length[1] = (unsigned char) (length >> 8) & 0xFF; 590 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); 591 592 // write rc cmd 593 rc_cmd = cmd | 0x80; 594 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 595 596 if (ret < 0) { 597 DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret); 598 return false; 599 } 600 601 // poll until active is 0 602 for (i = 0; i < 10; i++) { 603 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 604 if (rc_cmd == cmd) 605 // active is 0 606 break; 607 msleep(10); 608 } 609 610 // read rc result 611 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result)); 612 success = (rc_result == 0); 613 614 if (success && !is_write_cmd) { 615 // read rc data 616 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); 617 } 618 619 DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success); 620 621 return success; 622 } 623 624 static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) 625 { 626 unsigned char data[16] = {0}; 627 628 DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n"); 629 630 // Step 2 631 data[0] = 'P'; 632 data[1] = 'R'; 633 data[2] = 'I'; 634 data[3] = 'U'; 635 data[4] = 'S'; 636 637 if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data)) 638 return; 639 640 // Step 3 and 4 641 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 642 return; 643 644 data[0] &= (~(1 << 1)); // set bit 1 to 0 645 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 646 return; 647 648 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 649 return; 650 651 data[0] &= (~(1 << 1)); // set bit 1 to 0 652 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data)) 653 return; 654 655 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 656 return; 657 658 data[0] &= (~(1 << 1)); // set bit 1 to 0 659 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 660 return; 661 662 // Step 3 and 5 663 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 664 return; 665 666 data[0] |= (1 << 1); // set bit 1 to 1 667 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 668 return; 669 670 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 671 return; 672 673 data[0] |= (1 << 1); // set bit 1 to 1 674 return; 675 676 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 677 return; 678 679 data[0] |= (1 << 1); // set bit 1 to 1 680 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 681 return; 682 683 // Step 6 684 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) 685 return; 686 687 DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n"); 688 } 689 690 static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( 691 struct drm_dp_aux *aux, 692 const struct dc_stream_state *stream, 693 bool enable) 694 { 695 uint8_t ret = 0; 696 697 DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n"); 698 699 if (enable) { 700 /* When DSC is enabled on previous boot and reboot with the hub, 701 * there is a chance that Synaptics hub gets stuck during reboot sequence. 702 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream 703 */ 704 if (!stream->link->link_status.link_active && 705 memcmp(stream->link->dpcd_caps.branch_dev_name, 706 (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0) 707 apply_synaptics_fifo_reset_wa(aux); 708 709 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 710 DRM_INFO("Send DSC enable to synaptics\n"); 711 712 } else { 713 /* Synaptics hub not support virtual dpcd, 714 * external monitor occur garbage while disable DSC, 715 * Disable DSC only when entire link status turn to false, 716 */ 717 if (!stream->link->link_status.link_active) { 718 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 719 DRM_INFO("Send DSC disable to synaptics\n"); 720 } 721 } 722 723 return ret; 724 } 725 #endif 726 727 bool dm_helpers_dp_write_dsc_enable( 728 struct dc_context *ctx, 729 const struct dc_stream_state *stream, 730 bool enable) 731 { 732 uint8_t enable_dsc = enable ? 1 : 0; 733 struct amdgpu_dm_connector *aconnector; 734 uint8_t ret = 0; 735 736 if (!stream) 737 return false; 738 739 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 740 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 741 742 if (!aconnector->dsc_aux) 743 return false; 744 745 #if defined(CONFIG_DRM_AMD_DC_DCN) 746 // apply w/a to synaptics 747 if (needs_dsc_aux_workaround(aconnector->dc_link) && 748 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) 749 return write_dsc_enable_synaptics_non_virtual_dpcd_mst( 750 aconnector->dsc_aux, stream, enable_dsc); 751 #endif 752 753 ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); 754 DC_LOG_DC("Send DSC %s to MST RX\n", enable_dsc ? "enable" : "disable"); 755 } 756 757 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { 758 #if defined(CONFIG_DRM_AMD_DC_DCN) 759 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 760 #endif 761 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 762 DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); 763 #if defined(CONFIG_DRM_AMD_DC_DCN) 764 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 765 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 766 DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); 767 } 768 #endif 769 } 770 771 return (ret > 0); 772 } 773 774 bool dm_helpers_is_dp_sink_present(struct dc_link *link) 775 { 776 bool dp_sink_present; 777 struct amdgpu_dm_connector *aconnector = link->priv; 778 779 if (!aconnector) { 780 BUG_ON("Failed to find connector for link!"); 781 return true; 782 } 783 784 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex); 785 dp_sink_present = dc_link_is_dp_sink_present(link); 786 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex); 787 return dp_sink_present; 788 } 789 790 enum dc_edid_status dm_helpers_read_local_edid( 791 struct dc_context *ctx, 792 struct dc_link *link, 793 struct dc_sink *sink) 794 { 795 struct amdgpu_dm_connector *aconnector = link->priv; 796 struct drm_connector *connector = &aconnector->base; 797 struct i2c_adapter *ddc; 798 int retry = 3; 799 enum dc_edid_status edid_status; 800 struct edid *edid; 801 802 if (link->aux_mode) 803 ddc = &aconnector->dm_dp_aux.aux.ddc; 804 else 805 ddc = &aconnector->i2c->base; 806 807 /* some dongles read edid incorrectly the first time, 808 * do check sum and retry to make sure read correct edid. 809 */ 810 do { 811 812 edid = drm_get_edid(&aconnector->base, ddc); 813 814 /* DP Compliance Test 4.2.2.6 */ 815 if (link->aux_mode && connector->edid_corrupt) 816 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); 817 818 if (!edid && connector->edid_corrupt) { 819 connector->edid_corrupt = false; 820 return EDID_BAD_CHECKSUM; 821 } 822 823 if (!edid) 824 return EDID_NO_RESPONSE; 825 826 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); 827 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); 828 829 /* We don't need the original edid anymore */ 830 kfree(edid); 831 832 edid_status = dm_helpers_parse_edid_caps( 833 link, 834 &sink->dc_edid, 835 &sink->edid_caps); 836 837 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); 838 839 if (edid_status != EDID_OK) 840 DRM_ERROR("EDID err: %d, on connector: %s", 841 edid_status, 842 aconnector->base.name); 843 844 /* DP Compliance Test 4.2.2.3 */ 845 if (link->aux_mode) 846 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]); 847 848 return edid_status; 849 } 850 int dm_helper_dmub_aux_transfer_sync( 851 struct dc_context *ctx, 852 const struct dc_link *link, 853 struct aux_payload *payload, 854 enum aux_return_code_type *operation_result) 855 { 856 return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx, 857 link->link_index, (void *)payload, 858 (void *)operation_result); 859 } 860 861 int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, 862 const struct dc_link *link, 863 struct set_config_cmd_payload *payload, 864 enum set_config_status *operation_result) 865 { 866 return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx, 867 link->link_index, (void *)payload, 868 (void *)operation_result); 869 } 870 871 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) 872 { 873 /* TODO: something */ 874 } 875 876 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us) 877 { 878 // TODO: 879 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); 880 } 881 882 void *dm_helpers_allocate_gpu_mem( 883 struct dc_context *ctx, 884 enum dc_gpu_mem_alloc_type type, 885 size_t size, 886 long long *addr) 887 { 888 struct amdgpu_device *adev = ctx->driver_context; 889 struct dal_allocation *da; 890 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? 891 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; 892 int ret; 893 894 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); 895 if (!da) 896 return NULL; 897 898 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 899 domain, &da->bo, 900 &da->gpu_addr, &da->cpu_ptr); 901 902 *addr = da->gpu_addr; 903 904 if (ret) { 905 kfree(da); 906 return NULL; 907 } 908 909 /* add da to list in dm */ 910 list_add(&da->list, &adev->dm.da_list); 911 912 return da->cpu_ptr; 913 } 914 915 void dm_helpers_free_gpu_mem( 916 struct dc_context *ctx, 917 enum dc_gpu_mem_alloc_type type, 918 void *pvMem) 919 { 920 struct amdgpu_device *adev = ctx->driver_context; 921 struct dal_allocation *da; 922 923 /* walk the da list in DM */ 924 list_for_each_entry(da, &adev->dm.da_list, list) { 925 if (pvMem == da->cpu_ptr) { 926 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 927 list_del(&da->list); 928 kfree(da); 929 break; 930 } 931 } 932 } 933 934 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) 935 { 936 enum dc_irq_source irq_source; 937 bool ret; 938 939 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 940 941 ret = dc_interrupt_set(ctx->dc, irq_source, enable); 942 943 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n", 944 enable ? "en" : "dis", ret); 945 return ret; 946 } 947 948 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) 949 { 950 /* TODO: virtual DPCD */ 951 struct dc_link *link = stream->link; 952 union down_spread_ctrl old_downspread; 953 union down_spread_ctrl new_downspread; 954 955 if (link->aux_access_disabled) 956 return; 957 958 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 959 &old_downspread.raw, 960 sizeof(old_downspread))) 961 return; 962 963 new_downspread.raw = old_downspread.raw; 964 new_downspread.bits.IGNORE_MSA_TIMING_PARAM = 965 (stream->ignore_msa_timing_param) ? 1 : 0; 966 967 if (new_downspread.raw != old_downspread.raw) 968 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 969 &new_downspread.raw, 970 sizeof(new_downspread)); 971 } 972 973 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) 974 { 975 // TODO 976 } 977 978 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) 979 { 980 /* TODO: add periodic detection implementation */ 981 } 982