1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/string.h> 27 #include <linux/acpi.h> 28 #include <linux/i2c.h> 29 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_probe_helper.h> 32 #include <drm/amdgpu_drm.h> 33 #include <drm/drm_edid.h> 34 35 #include "dm_services.h" 36 #include "amdgpu.h" 37 #include "dc.h" 38 #include "amdgpu_dm.h" 39 #include "amdgpu_dm_irq.h" 40 #include "amdgpu_dm_mst_types.h" 41 #include "dpcd_defs.h" 42 #include "dc/inc/core_types.h" 43 44 #include "dm_helpers.h" 45 #include "ddc_service_types.h" 46 47 static u32 edid_extract_panel_id(struct edid *edid) 48 { 49 return (u32)edid->mfg_id[0] << 24 | 50 (u32)edid->mfg_id[1] << 16 | 51 (u32)EDID_PRODUCT_ID(edid); 52 } 53 54 static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) 55 { 56 uint32_t panel_id = edid_extract_panel_id(edid); 57 58 switch (panel_id) { 59 /* Workaround for some monitors which does not work well with FAMS */ 60 case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): 61 case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): 62 case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC): 63 DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id); 64 edid_caps->panel_patch.disable_fams = true; 65 break; 66 /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ 67 case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB): 68 case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B): 69 DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); 70 edid_caps->panel_patch.remove_sink_ext_caps = true; 71 break; 72 default: 73 return; 74 } 75 } 76 77 /** 78 * dm_helpers_parse_edid_caps() - Parse edid caps 79 * 80 * @link: current detected link 81 * @edid: [in] pointer to edid 82 * @edid_caps: [in] pointer to edid caps 83 * 84 * Return: void 85 */ 86 enum dc_edid_status dm_helpers_parse_edid_caps( 87 struct dc_link *link, 88 const struct dc_edid *edid, 89 struct dc_edid_caps *edid_caps) 90 { 91 struct amdgpu_dm_connector *aconnector = link->priv; 92 struct drm_connector *connector = &aconnector->base; 93 struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; 94 struct cea_sad *sads; 95 int sad_count = -1; 96 int sadb_count = -1; 97 int i = 0; 98 uint8_t *sadb = NULL; 99 100 enum dc_edid_status result = EDID_OK; 101 102 if (!edid_caps || !edid) 103 return EDID_BAD_INPUT; 104 105 if (!drm_edid_is_valid(edid_buf)) 106 result = EDID_BAD_CHECKSUM; 107 108 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | 109 ((uint16_t) edid_buf->mfg_id[1])<<8; 110 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | 111 ((uint16_t) edid_buf->prod_code[1])<<8; 112 edid_caps->serial_number = edid_buf->serial; 113 edid_caps->manufacture_week = edid_buf->mfg_week; 114 edid_caps->manufacture_year = edid_buf->mfg_year; 115 116 drm_edid_get_monitor_name(edid_buf, 117 edid_caps->display_name, 118 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 119 120 edid_caps->edid_hdmi = connector->display_info.is_hdmi; 121 122 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); 123 if (sad_count <= 0) 124 return result; 125 126 edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT); 127 for (i = 0; i < edid_caps->audio_mode_count; ++i) { 128 struct cea_sad *sad = &sads[i]; 129 130 edid_caps->audio_modes[i].format_code = sad->format; 131 edid_caps->audio_modes[i].channel_count = sad->channels + 1; 132 edid_caps->audio_modes[i].sample_rate = sad->freq; 133 edid_caps->audio_modes[i].sample_size = sad->byte2; 134 } 135 136 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); 137 138 if (sadb_count < 0) { 139 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); 140 sadb_count = 0; 141 } 142 143 if (sadb_count) 144 edid_caps->speaker_flags = sadb[0]; 145 else 146 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; 147 148 apply_edid_quirks(edid_buf, edid_caps); 149 150 kfree(sads); 151 kfree(sadb); 152 153 return result; 154 } 155 156 static void 157 fill_dc_mst_payload_table_from_drm(struct dc_link *link, 158 bool enable, 159 struct drm_dp_mst_atomic_payload *target_payload, 160 struct dc_dp_mst_stream_allocation_table *table) 161 { 162 struct dc_dp_mst_stream_allocation_table new_table = { 0 }; 163 struct dc_dp_mst_stream_allocation *sa; 164 struct link_mst_stream_allocation_table copy_of_link_table = 165 link->mst_stream_alloc_table; 166 167 int i; 168 int current_hw_table_stream_cnt = copy_of_link_table.stream_count; 169 struct link_mst_stream_allocation *dc_alloc; 170 171 /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/ 172 if (enable) { 173 dc_alloc = 174 ©_of_link_table.stream_allocations[current_hw_table_stream_cnt]; 175 dc_alloc->vcp_id = target_payload->vcpi; 176 dc_alloc->slot_count = target_payload->time_slots; 177 } else { 178 for (i = 0; i < copy_of_link_table.stream_count; i++) { 179 dc_alloc = 180 ©_of_link_table.stream_allocations[i]; 181 182 if (dc_alloc->vcp_id == target_payload->vcpi) { 183 dc_alloc->vcp_id = 0; 184 dc_alloc->slot_count = 0; 185 break; 186 } 187 } 188 ASSERT(i != copy_of_link_table.stream_count); 189 } 190 191 /* Fill payload info*/ 192 for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 193 dc_alloc = 194 ©_of_link_table.stream_allocations[i]; 195 if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) { 196 sa = &new_table.stream_allocations[new_table.stream_count]; 197 sa->slot_count = dc_alloc->slot_count; 198 sa->vcp_id = dc_alloc->vcp_id; 199 new_table.stream_count++; 200 } 201 } 202 203 /* Overwrite the old table */ 204 *table = new_table; 205 } 206 207 void dm_helpers_dp_update_branch_info( 208 struct dc_context *ctx, 209 const struct dc_link *link) 210 {} 211 212 static void dm_helpers_construct_old_payload( 213 struct drm_dp_mst_topology_mgr *mgr, 214 struct drm_dp_mst_topology_state *mst_state, 215 struct drm_dp_mst_atomic_payload *new_payload, 216 struct drm_dp_mst_atomic_payload *old_payload) 217 { 218 struct drm_dp_mst_atomic_payload *pos; 219 int pbn_per_slot = mst_state->pbn_div; 220 u8 next_payload_vc_start = mgr->next_start_slot; 221 u8 payload_vc_start = new_payload->vc_start_slot; 222 u8 allocated_time_slots; 223 224 *old_payload = *new_payload; 225 226 /* Set correct time_slots/PBN of old payload. 227 * other fields (delete & dsc_enabled) in 228 * struct drm_dp_mst_atomic_payload are don't care fields 229 * while calling drm_dp_remove_payload_part2() 230 */ 231 list_for_each_entry(pos, &mst_state->payloads, next) { 232 if (pos != new_payload && 233 pos->vc_start_slot > payload_vc_start && 234 pos->vc_start_slot < next_payload_vc_start) 235 next_payload_vc_start = pos->vc_start_slot; 236 } 237 238 allocated_time_slots = next_payload_vc_start - payload_vc_start; 239 240 old_payload->time_slots = allocated_time_slots; 241 old_payload->pbn = allocated_time_slots * pbn_per_slot; 242 } 243 244 /* 245 * Writes payload allocation table in immediate downstream device. 246 */ 247 bool dm_helpers_dp_mst_write_payload_allocation_table( 248 struct dc_context *ctx, 249 const struct dc_stream_state *stream, 250 struct dc_dp_mst_stream_allocation_table *proposed_table, 251 bool enable) 252 { 253 struct amdgpu_dm_connector *aconnector; 254 struct drm_dp_mst_topology_state *mst_state; 255 struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload; 256 struct drm_dp_mst_topology_mgr *mst_mgr; 257 258 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 259 /* Accessing the connector state is required for vcpi_slots allocation 260 * and directly relies on behaviour in commit check 261 * that blocks before commit guaranteeing that the state 262 * is not gonna be swapped while still in use in commit tail 263 */ 264 265 if (!aconnector || !aconnector->mst_root) 266 return false; 267 268 mst_mgr = &aconnector->mst_root->mst_mgr; 269 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); 270 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); 271 272 if (enable) { 273 target_payload = new_payload; 274 275 /* It's OK for this to fail */ 276 drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload); 277 } else { 278 /* construct old payload by VCPI*/ 279 dm_helpers_construct_old_payload(mst_mgr, mst_state, 280 new_payload, &old_payload); 281 target_payload = &old_payload; 282 283 drm_dp_remove_payload_part1(mst_mgr, mst_state, new_payload); 284 } 285 286 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 287 * AUX message. The sequence is slot 1-63 allocated sequence for each 288 * stream. AMD ASIC stream slot allocation should follow the same 289 * sequence. copy DRM MST allocation to dc 290 */ 291 fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table); 292 293 return true; 294 } 295 296 /* 297 * poll pending down reply 298 */ 299 void dm_helpers_dp_mst_poll_pending_down_reply( 300 struct dc_context *ctx, 301 const struct dc_link *link) 302 {} 303 304 /* 305 * Clear payload allocation table before enable MST DP link. 306 */ 307 void dm_helpers_dp_mst_clear_payload_allocation_table( 308 struct dc_context *ctx, 309 const struct dc_link *link) 310 {} 311 312 /* 313 * Polls for ACT (allocation change trigger) handled and sends 314 * ALLOCATE_PAYLOAD message. 315 */ 316 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( 317 struct dc_context *ctx, 318 const struct dc_stream_state *stream) 319 { 320 struct amdgpu_dm_connector *aconnector; 321 struct drm_dp_mst_topology_mgr *mst_mgr; 322 int ret; 323 324 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 325 326 if (!aconnector || !aconnector->mst_root) 327 return ACT_FAILED; 328 329 mst_mgr = &aconnector->mst_root->mst_mgr; 330 331 if (!mst_mgr->mst_state) 332 return ACT_FAILED; 333 334 ret = drm_dp_check_act_status(mst_mgr); 335 336 if (ret) 337 return ACT_FAILED; 338 339 return ACT_SUCCESS; 340 } 341 342 bool dm_helpers_dp_mst_send_payload_allocation( 343 struct dc_context *ctx, 344 const struct dc_stream_state *stream, 345 bool enable) 346 { 347 struct amdgpu_dm_connector *aconnector; 348 struct drm_dp_mst_topology_state *mst_state; 349 struct drm_dp_mst_topology_mgr *mst_mgr; 350 struct drm_dp_mst_atomic_payload *new_payload, old_payload; 351 enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; 352 enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; 353 int ret = 0; 354 355 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 356 357 if (!aconnector || !aconnector->mst_root) 358 return false; 359 360 mst_mgr = &aconnector->mst_root->mst_mgr; 361 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); 362 363 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); 364 365 if (!enable) { 366 set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; 367 clr_flag = MST_ALLOCATE_NEW_PAYLOAD; 368 } 369 370 if (enable) { 371 ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); 372 } else { 373 dm_helpers_construct_old_payload(mst_mgr, mst_state, 374 new_payload, &old_payload); 375 drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); 376 } 377 378 if (ret) { 379 amdgpu_dm_set_mst_status(&aconnector->mst_status, 380 set_flag, false); 381 } else { 382 amdgpu_dm_set_mst_status(&aconnector->mst_status, 383 set_flag, true); 384 amdgpu_dm_set_mst_status(&aconnector->mst_status, 385 clr_flag, false); 386 } 387 388 return true; 389 } 390 391 void dm_dtn_log_begin(struct dc_context *ctx, 392 struct dc_log_buffer_ctx *log_ctx) 393 { 394 static const char msg[] = "[dtn begin]\n"; 395 396 if (!log_ctx) { 397 pr_info("%s", msg); 398 return; 399 } 400 401 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 402 } 403 404 __printf(3, 4) 405 void dm_dtn_log_append_v(struct dc_context *ctx, 406 struct dc_log_buffer_ctx *log_ctx, 407 const char *msg, ...) 408 { 409 va_list args; 410 size_t total; 411 int n; 412 413 if (!log_ctx) { 414 /* No context, redirect to dmesg. */ 415 struct va_format vaf; 416 417 vaf.fmt = msg; 418 vaf.va = &args; 419 420 va_start(args, msg); 421 pr_info("%pV", &vaf); 422 va_end(args); 423 424 return; 425 } 426 427 /* Measure the output. */ 428 va_start(args, msg); 429 n = vsnprintf(NULL, 0, msg, args); 430 va_end(args); 431 432 if (n <= 0) 433 return; 434 435 /* Reallocate the string buffer as needed. */ 436 total = log_ctx->pos + n + 1; 437 438 if (total > log_ctx->size) { 439 char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL); 440 441 if (buf) { 442 memcpy(buf, log_ctx->buf, log_ctx->pos); 443 kfree(log_ctx->buf); 444 445 log_ctx->buf = buf; 446 log_ctx->size = total; 447 } 448 } 449 450 if (!log_ctx->buf) 451 return; 452 453 /* Write the formatted string to the log buffer. */ 454 va_start(args, msg); 455 n = vscnprintf( 456 log_ctx->buf + log_ctx->pos, 457 log_ctx->size - log_ctx->pos, 458 msg, 459 args); 460 va_end(args); 461 462 if (n > 0) 463 log_ctx->pos += n; 464 } 465 466 void dm_dtn_log_end(struct dc_context *ctx, 467 struct dc_log_buffer_ctx *log_ctx) 468 { 469 static const char msg[] = "[dtn end]\n"; 470 471 if (!log_ctx) { 472 pr_info("%s", msg); 473 return; 474 } 475 476 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 477 } 478 479 bool dm_helpers_dp_mst_start_top_mgr( 480 struct dc_context *ctx, 481 const struct dc_link *link, 482 bool boot) 483 { 484 struct amdgpu_dm_connector *aconnector = link->priv; 485 int ret; 486 487 if (!aconnector) { 488 DRM_ERROR("Failed to find connector for link!"); 489 return false; 490 } 491 492 if (boot) { 493 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", 494 aconnector, aconnector->base.base.id); 495 return true; 496 } 497 498 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", 499 aconnector, aconnector->base.base.id); 500 501 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 502 if (ret < 0) { 503 DRM_ERROR("DM_MST: Failed to set the device into MST mode!"); 504 return false; 505 } 506 507 DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0], 508 aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK); 509 510 return true; 511 } 512 513 bool dm_helpers_dp_mst_stop_top_mgr( 514 struct dc_context *ctx, 515 struct dc_link *link) 516 { 517 struct amdgpu_dm_connector *aconnector = link->priv; 518 519 if (!aconnector) { 520 DRM_ERROR("Failed to find connector for link!"); 521 return false; 522 } 523 524 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", 525 aconnector, aconnector->base.base.id); 526 527 if (aconnector->mst_mgr.mst_state == true) { 528 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); 529 link->cur_link_settings.lane_count = 0; 530 } 531 532 return false; 533 } 534 535 bool dm_helpers_dp_read_dpcd( 536 struct dc_context *ctx, 537 const struct dc_link *link, 538 uint32_t address, 539 uint8_t *data, 540 uint32_t size) 541 { 542 543 struct amdgpu_dm_connector *aconnector = link->priv; 544 545 if (!aconnector) 546 return false; 547 548 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data, 549 size) == size; 550 } 551 552 bool dm_helpers_dp_write_dpcd( 553 struct dc_context *ctx, 554 const struct dc_link *link, 555 uint32_t address, 556 const uint8_t *data, 557 uint32_t size) 558 { 559 struct amdgpu_dm_connector *aconnector = link->priv; 560 561 if (!aconnector) { 562 DRM_ERROR("Failed to find connector for link!"); 563 return false; 564 } 565 566 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, 567 address, (uint8_t *)data, size) > 0; 568 } 569 570 bool dm_helpers_submit_i2c( 571 struct dc_context *ctx, 572 const struct dc_link *link, 573 struct i2c_command *cmd) 574 { 575 struct amdgpu_dm_connector *aconnector = link->priv; 576 struct i2c_msg *msgs; 577 int i = 0; 578 int num = cmd->number_of_payloads; 579 bool result; 580 581 if (!aconnector) { 582 DRM_ERROR("Failed to find connector for link!"); 583 return false; 584 } 585 586 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); 587 588 if (!msgs) 589 return false; 590 591 for (i = 0; i < num; i++) { 592 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; 593 msgs[i].addr = cmd->payloads[i].address; 594 msgs[i].len = cmd->payloads[i].length; 595 msgs[i].buf = cmd->payloads[i].data; 596 } 597 598 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; 599 600 kfree(msgs); 601 602 return result; 603 } 604 605 static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, 606 bool is_write_cmd, 607 unsigned char cmd, 608 unsigned int length, 609 unsigned int offset, 610 unsigned char *data) 611 { 612 bool success = false; 613 unsigned char rc_data[16] = {0}; 614 unsigned char rc_offset[4] = {0}; 615 unsigned char rc_length[2] = {0}; 616 unsigned char rc_cmd = 0; 617 unsigned char rc_result = 0xFF; 618 unsigned char i = 0; 619 int ret; 620 621 if (is_write_cmd) { 622 // write rc data 623 memmove(rc_data, data, length); 624 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); 625 } 626 627 // write rc offset 628 rc_offset[0] = (unsigned char) offset & 0xFF; 629 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF; 630 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; 631 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; 632 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); 633 634 // write rc length 635 rc_length[0] = (unsigned char) length & 0xFF; 636 rc_length[1] = (unsigned char) (length >> 8) & 0xFF; 637 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); 638 639 // write rc cmd 640 rc_cmd = cmd | 0x80; 641 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 642 643 if (ret < 0) { 644 DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); 645 return false; 646 } 647 648 // poll until active is 0 649 for (i = 0; i < 10; i++) { 650 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 651 if (rc_cmd == cmd) 652 // active is 0 653 break; 654 msleep(10); 655 } 656 657 // read rc result 658 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result)); 659 success = (rc_result == 0); 660 661 if (success && !is_write_cmd) { 662 // read rc data 663 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); 664 } 665 666 drm_dbg_dp(aux->drm_dev, "success = %d\n", success); 667 668 return success; 669 } 670 671 static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) 672 { 673 unsigned char data[16] = {0}; 674 675 drm_dbg_dp(aux->drm_dev, "Start\n"); 676 677 // Step 2 678 data[0] = 'P'; 679 data[1] = 'R'; 680 data[2] = 'I'; 681 data[3] = 'U'; 682 data[4] = 'S'; 683 684 if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data)) 685 return; 686 687 // Step 3 and 4 688 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 689 return; 690 691 data[0] &= (~(1 << 1)); // set bit 1 to 0 692 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 693 return; 694 695 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 696 return; 697 698 data[0] &= (~(1 << 1)); // set bit 1 to 0 699 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data)) 700 return; 701 702 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 703 return; 704 705 data[0] &= (~(1 << 1)); // set bit 1 to 0 706 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 707 return; 708 709 // Step 3 and 5 710 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 711 return; 712 713 data[0] |= (1 << 1); // set bit 1 to 1 714 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 715 return; 716 717 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 718 return; 719 720 data[0] |= (1 << 1); // set bit 1 to 1 721 722 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 723 return; 724 725 data[0] |= (1 << 1); // set bit 1 to 1 726 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 727 return; 728 729 // Step 6 730 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) 731 return; 732 733 drm_dbg_dp(aux->drm_dev, "Done\n"); 734 } 735 736 /* MST Dock */ 737 static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; 738 739 static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( 740 struct drm_dp_aux *aux, 741 const struct dc_stream_state *stream, 742 bool enable) 743 { 744 uint8_t ret = 0; 745 746 drm_dbg_dp(aux->drm_dev, 747 "Configure DSC to non-virtual dpcd synaptics\n"); 748 749 if (enable) { 750 /* When DSC is enabled on previous boot and reboot with the hub, 751 * there is a chance that Synaptics hub gets stuck during reboot sequence. 752 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream 753 */ 754 if (!stream->link->link_status.link_active && 755 memcmp(stream->link->dpcd_caps.branch_dev_name, 756 (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0) 757 apply_synaptics_fifo_reset_wa(aux); 758 759 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 760 DRM_INFO("Send DSC enable to synaptics\n"); 761 762 } else { 763 /* Synaptics hub not support virtual dpcd, 764 * external monitor occur garbage while disable DSC, 765 * Disable DSC only when entire link status turn to false, 766 */ 767 if (!stream->link->link_status.link_active) { 768 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 769 DRM_INFO("Send DSC disable to synaptics\n"); 770 } 771 } 772 773 return ret; 774 } 775 776 bool dm_helpers_dp_write_dsc_enable( 777 struct dc_context *ctx, 778 const struct dc_stream_state *stream, 779 bool enable) 780 { 781 static const uint8_t DSC_DISABLE; 782 static const uint8_t DSC_DECODING = 0x01; 783 static const uint8_t DSC_PASSTHROUGH = 0x02; 784 785 struct amdgpu_dm_connector *aconnector = 786 (struct amdgpu_dm_connector *)stream->dm_stream_context; 787 struct drm_device *dev = aconnector->base.dev; 788 struct drm_dp_mst_port *port; 789 uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE; 790 uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE; 791 uint8_t ret = 0; 792 793 if (!stream) 794 return false; 795 796 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 797 if (!aconnector->dsc_aux) 798 return false; 799 800 // apply w/a to synaptics 801 if (needs_dsc_aux_workaround(aconnector->dc_link) && 802 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) 803 return write_dsc_enable_synaptics_non_virtual_dpcd_mst( 804 aconnector->dsc_aux, stream, enable_dsc); 805 806 port = aconnector->mst_output_port; 807 808 if (enable) { 809 if (port->passthrough_aux) { 810 ret = drm_dp_dpcd_write(port->passthrough_aux, 811 DP_DSC_ENABLE, 812 &enable_passthrough, 1); 813 drm_dbg_dp(dev, 814 "Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", 815 ret); 816 } 817 818 ret = drm_dp_dpcd_write(aconnector->dsc_aux, 819 DP_DSC_ENABLE, &enable_dsc, 1); 820 drm_dbg_dp(dev, 821 "Sent DSC decoding enable to %s port, ret = %u\n", 822 (port->passthrough_aux) ? "remote RX" : 823 "virtual dpcd", 824 ret); 825 } else { 826 ret = drm_dp_dpcd_write(aconnector->dsc_aux, 827 DP_DSC_ENABLE, &enable_dsc, 1); 828 drm_dbg_dp(dev, 829 "Sent DSC decoding disable to %s port, ret = %u\n", 830 (port->passthrough_aux) ? "remote RX" : 831 "virtual dpcd", 832 ret); 833 834 if (port->passthrough_aux) { 835 ret = drm_dp_dpcd_write(port->passthrough_aux, 836 DP_DSC_ENABLE, 837 &enable_passthrough, 1); 838 drm_dbg_dp(dev, 839 "Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", 840 ret); 841 } 842 } 843 } 844 845 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { 846 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 847 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 848 drm_dbg_dp(dev, 849 "Send DSC %s to SST RX\n", 850 enable_dsc ? "enable" : "disable"); 851 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 852 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 853 drm_dbg_dp(dev, 854 "Send DSC %s to DP-HDMI PCON\n", 855 enable_dsc ? "enable" : "disable"); 856 } 857 } 858 859 return ret; 860 } 861 862 bool dm_helpers_is_dp_sink_present(struct dc_link *link) 863 { 864 bool dp_sink_present; 865 struct amdgpu_dm_connector *aconnector = link->priv; 866 867 if (!aconnector) { 868 BUG_ON("Failed to find connector for link!"); 869 return true; 870 } 871 872 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex); 873 dp_sink_present = dc_link_is_dp_sink_present(link); 874 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex); 875 return dp_sink_present; 876 } 877 878 enum dc_edid_status dm_helpers_read_local_edid( 879 struct dc_context *ctx, 880 struct dc_link *link, 881 struct dc_sink *sink) 882 { 883 struct amdgpu_dm_connector *aconnector = link->priv; 884 struct drm_connector *connector = &aconnector->base; 885 struct i2c_adapter *ddc; 886 int retry = 3; 887 enum dc_edid_status edid_status; 888 struct edid *edid; 889 890 if (link->aux_mode) 891 ddc = &aconnector->dm_dp_aux.aux.ddc; 892 else 893 ddc = &aconnector->i2c->base; 894 895 /* some dongles read edid incorrectly the first time, 896 * do check sum and retry to make sure read correct edid. 897 */ 898 do { 899 900 edid = drm_get_edid(&aconnector->base, ddc); 901 902 /* DP Compliance Test 4.2.2.6 */ 903 if (link->aux_mode && connector->edid_corrupt) 904 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); 905 906 if (!edid && connector->edid_corrupt) { 907 connector->edid_corrupt = false; 908 return EDID_BAD_CHECKSUM; 909 } 910 911 if (!edid) 912 return EDID_NO_RESPONSE; 913 914 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); 915 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); 916 917 /* We don't need the original edid anymore */ 918 kfree(edid); 919 920 edid_status = dm_helpers_parse_edid_caps( 921 link, 922 &sink->dc_edid, 923 &sink->edid_caps); 924 925 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); 926 927 if (edid_status != EDID_OK) 928 DRM_ERROR("EDID err: %d, on connector: %s", 929 edid_status, 930 aconnector->base.name); 931 if (link->aux_mode) { 932 union test_request test_request = {0}; 933 union test_response test_response = {0}; 934 935 dm_helpers_dp_read_dpcd(ctx, 936 link, 937 DP_TEST_REQUEST, 938 &test_request.raw, 939 sizeof(union test_request)); 940 941 if (!test_request.bits.EDID_READ) 942 return edid_status; 943 944 test_response.bits.EDID_CHECKSUM_WRITE = 1; 945 946 dm_helpers_dp_write_dpcd(ctx, 947 link, 948 DP_TEST_EDID_CHECKSUM, 949 &sink->dc_edid.raw_edid[sink->dc_edid.length-1], 950 1); 951 952 dm_helpers_dp_write_dpcd(ctx, 953 link, 954 DP_TEST_RESPONSE, 955 &test_response.raw, 956 sizeof(test_response)); 957 958 } 959 960 return edid_status; 961 } 962 int dm_helper_dmub_aux_transfer_sync( 963 struct dc_context *ctx, 964 const struct dc_link *link, 965 struct aux_payload *payload, 966 enum aux_return_code_type *operation_result) 967 { 968 return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, 969 operation_result); 970 } 971 972 int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, 973 const struct dc_link *link, 974 struct set_config_cmd_payload *payload, 975 enum set_config_status *operation_result) 976 { 977 return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload, 978 operation_result); 979 } 980 981 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) 982 { 983 /* TODO: something */ 984 } 985 986 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us) 987 { 988 // TODO: 989 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); 990 } 991 992 void dm_helpers_init_panel_settings( 993 struct dc_context *ctx, 994 struct dc_panel_config *panel_config, 995 struct dc_sink *sink) 996 { 997 // Extra Panel Power Sequence 998 panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms; 999 panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms; 1000 panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off; 1001 panel_config->pps.extra_post_t7_ms = 0; 1002 panel_config->pps.extra_pre_t11_ms = 0; 1003 panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms; 1004 panel_config->pps.extra_post_OUI_ms = 0; 1005 // Feature DSC 1006 panel_config->dsc.disable_dsc_edp = false; 1007 panel_config->dsc.force_dsc_edp_policy = 0; 1008 } 1009 1010 void dm_helpers_override_panel_settings( 1011 struct dc_context *ctx, 1012 struct dc_panel_config *panel_config) 1013 { 1014 // Feature DSC 1015 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1016 panel_config->dsc.disable_dsc_edp = true; 1017 } 1018 1019 void *dm_helpers_allocate_gpu_mem( 1020 struct dc_context *ctx, 1021 enum dc_gpu_mem_alloc_type type, 1022 size_t size, 1023 long long *addr) 1024 { 1025 struct amdgpu_device *adev = ctx->driver_context; 1026 struct dal_allocation *da; 1027 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? 1028 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; 1029 int ret; 1030 1031 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); 1032 if (!da) 1033 return NULL; 1034 1035 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 1036 domain, &da->bo, 1037 &da->gpu_addr, &da->cpu_ptr); 1038 1039 *addr = da->gpu_addr; 1040 1041 if (ret) { 1042 kfree(da); 1043 return NULL; 1044 } 1045 1046 /* add da to list in dm */ 1047 list_add(&da->list, &adev->dm.da_list); 1048 1049 return da->cpu_ptr; 1050 } 1051 1052 void dm_helpers_free_gpu_mem( 1053 struct dc_context *ctx, 1054 enum dc_gpu_mem_alloc_type type, 1055 void *pvMem) 1056 { 1057 struct amdgpu_device *adev = ctx->driver_context; 1058 struct dal_allocation *da; 1059 1060 /* walk the da list in DM */ 1061 list_for_each_entry(da, &adev->dm.da_list, list) { 1062 if (pvMem == da->cpu_ptr) { 1063 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 1064 list_del(&da->list); 1065 kfree(da); 1066 break; 1067 } 1068 } 1069 } 1070 1071 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) 1072 { 1073 enum dc_irq_source irq_source; 1074 bool ret; 1075 1076 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 1077 1078 ret = dc_interrupt_set(ctx->dc, irq_source, enable); 1079 1080 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n", 1081 enable ? "en" : "dis", ret); 1082 return ret; 1083 } 1084 1085 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) 1086 { 1087 /* TODO: virtual DPCD */ 1088 struct dc_link *link = stream->link; 1089 union down_spread_ctrl old_downspread; 1090 union down_spread_ctrl new_downspread; 1091 1092 if (link->aux_access_disabled) 1093 return; 1094 1095 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 1096 &old_downspread.raw, 1097 sizeof(old_downspread))) 1098 return; 1099 1100 new_downspread.raw = old_downspread.raw; 1101 new_downspread.bits.IGNORE_MSA_TIMING_PARAM = 1102 (stream->ignore_msa_timing_param) ? 1 : 0; 1103 1104 if (new_downspread.raw != old_downspread.raw) 1105 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 1106 &new_downspread.raw, 1107 sizeof(new_downspread)); 1108 } 1109 1110 bool dm_helpers_dp_handle_test_pattern_request( 1111 struct dc_context *ctx, 1112 const struct dc_link *link, 1113 union link_test_pattern dpcd_test_pattern, 1114 union test_misc dpcd_test_params) 1115 { 1116 enum dp_test_pattern test_pattern; 1117 enum dp_test_pattern_color_space test_pattern_color_space = 1118 DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; 1119 enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED; 1120 enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED; 1121 struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; 1122 struct pipe_ctx *pipe_ctx = NULL; 1123 struct amdgpu_dm_connector *aconnector = link->priv; 1124 struct drm_device *dev = aconnector->base.dev; 1125 int i; 1126 1127 for (i = 0; i < MAX_PIPES; i++) { 1128 if (pipes[i].stream == NULL) 1129 continue; 1130 1131 if (pipes[i].stream->link == link && !pipes[i].top_pipe && 1132 !pipes[i].prev_odm_pipe) { 1133 pipe_ctx = &pipes[i]; 1134 break; 1135 } 1136 } 1137 1138 if (pipe_ctx == NULL) 1139 return false; 1140 1141 switch (dpcd_test_pattern.bits.PATTERN) { 1142 case LINK_TEST_PATTERN_COLOR_RAMP: 1143 test_pattern = DP_TEST_PATTERN_COLOR_RAMP; 1144 break; 1145 case LINK_TEST_PATTERN_VERTICAL_BARS: 1146 test_pattern = DP_TEST_PATTERN_VERTICAL_BARS; 1147 break; /* black and white */ 1148 case LINK_TEST_PATTERN_COLOR_SQUARES: 1149 test_pattern = (dpcd_test_params.bits.DYN_RANGE == 1150 TEST_DYN_RANGE_VESA ? 1151 DP_TEST_PATTERN_COLOR_SQUARES : 1152 DP_TEST_PATTERN_COLOR_SQUARES_CEA); 1153 break; 1154 default: 1155 test_pattern = DP_TEST_PATTERN_VIDEO_MODE; 1156 break; 1157 } 1158 1159 if (dpcd_test_params.bits.CLR_FORMAT == 0) 1160 test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; 1161 else 1162 test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? 1163 DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : 1164 DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; 1165 1166 switch (dpcd_test_params.bits.BPC) { 1167 case 0: // 6 bits 1168 requestColorDepth = COLOR_DEPTH_666; 1169 break; 1170 case 1: // 8 bits 1171 requestColorDepth = COLOR_DEPTH_888; 1172 break; 1173 case 2: // 10 bits 1174 requestColorDepth = COLOR_DEPTH_101010; 1175 break; 1176 case 3: // 12 bits 1177 requestColorDepth = COLOR_DEPTH_121212; 1178 break; 1179 default: 1180 break; 1181 } 1182 1183 switch (dpcd_test_params.bits.CLR_FORMAT) { 1184 case 0: 1185 requestPixelEncoding = PIXEL_ENCODING_RGB; 1186 break; 1187 case 1: 1188 requestPixelEncoding = PIXEL_ENCODING_YCBCR422; 1189 break; 1190 case 2: 1191 requestPixelEncoding = PIXEL_ENCODING_YCBCR444; 1192 break; 1193 default: 1194 requestPixelEncoding = PIXEL_ENCODING_RGB; 1195 break; 1196 } 1197 1198 if ((requestColorDepth != COLOR_DEPTH_UNDEFINED 1199 && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) 1200 || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED 1201 && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) { 1202 drm_dbg(dev, 1203 "original bpc %d pix encoding %d, changing to %d %d\n", 1204 pipe_ctx->stream->timing.display_color_depth, 1205 pipe_ctx->stream->timing.pixel_encoding, 1206 requestColorDepth, 1207 requestPixelEncoding); 1208 pipe_ctx->stream->timing.display_color_depth = requestColorDepth; 1209 pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding; 1210 1211 dc_link_update_dsc_config(pipe_ctx); 1212 1213 aconnector->timing_changed = true; 1214 /* store current timing */ 1215 if (aconnector->timing_requested) 1216 *aconnector->timing_requested = pipe_ctx->stream->timing; 1217 else 1218 drm_err(dev, "timing storage failed\n"); 1219 1220 } 1221 1222 pipe_ctx->stream->test_pattern.type = test_pattern; 1223 pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; 1224 1225 dc_link_dp_set_test_pattern( 1226 (struct dc_link *) link, 1227 test_pattern, 1228 test_pattern_color_space, 1229 NULL, 1230 NULL, 1231 0); 1232 1233 return false; 1234 } 1235 1236 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) 1237 { 1238 // TODO 1239 } 1240 1241 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) 1242 { 1243 /* TODO: add periodic detection implementation */ 1244 } 1245 1246 void dm_helpers_dp_mst_update_branch_bandwidth( 1247 struct dc_context *ctx, 1248 struct dc_link *link) 1249 { 1250 // TODO 1251 } 1252 1253 static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id) 1254 { 1255 bool ret_val = false; 1256 1257 switch (branch_dev_id) { 1258 case DP_BRANCH_DEVICE_ID_0060AD: 1259 case DP_BRANCH_DEVICE_ID_00E04C: 1260 case DP_BRANCH_DEVICE_ID_90CC24: 1261 ret_val = true; 1262 break; 1263 default: 1264 break; 1265 } 1266 1267 return ret_val; 1268 } 1269 1270 enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link) 1271 { 1272 struct dpcd_caps *dpcd_caps = &link->dpcd_caps; 1273 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 1274 1275 switch (dpcd_caps->dongle_type) { 1276 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 1277 if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true && 1278 dpcd_caps->allow_invalid_MSA_timing_param == true && 1279 dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id)) 1280 as_type = FREESYNC_TYPE_PCON_IN_WHITELIST; 1281 break; 1282 default: 1283 break; 1284 } 1285 1286 return as_type; 1287 } 1288