1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/string.h> 27 #include <linux/acpi.h> 28 #include <linux/version.h> 29 #include <linux/i2c.h> 30 31 #include <drm/drm_probe_helper.h> 32 #include <drm/amdgpu_drm.h> 33 #include <drm/drm_edid.h> 34 35 #include "dm_services.h" 36 #include "amdgpu.h" 37 #include "dc.h" 38 #include "amdgpu_dm.h" 39 #include "amdgpu_dm_irq.h" 40 #include "amdgpu_dm_mst_types.h" 41 42 #include "dm_helpers.h" 43 44 /* dm_helpers_parse_edid_caps 45 * 46 * Parse edid caps 47 * 48 * @edid: [in] pointer to edid 49 * edid_caps: [in] pointer to edid caps 50 * @return 51 * void 52 * */ 53 enum dc_edid_status dm_helpers_parse_edid_caps( 54 struct dc_context *ctx, 55 const struct dc_edid *edid, 56 struct dc_edid_caps *edid_caps) 57 { 58 struct edid *edid_buf = (struct edid *) edid->raw_edid; 59 struct cea_sad *sads; 60 int sad_count = -1; 61 int sadb_count = -1; 62 int i = 0; 63 int j = 0; 64 uint8_t *sadb = NULL; 65 66 enum dc_edid_status result = EDID_OK; 67 68 if (!edid_caps || !edid) 69 return EDID_BAD_INPUT; 70 71 if (!drm_edid_is_valid(edid_buf)) 72 result = EDID_BAD_CHECKSUM; 73 74 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | 75 ((uint16_t) edid_buf->mfg_id[1])<<8; 76 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | 77 ((uint16_t) edid_buf->prod_code[1])<<8; 78 edid_caps->serial_number = edid_buf->serial; 79 edid_caps->manufacture_week = edid_buf->mfg_week; 80 edid_caps->manufacture_year = edid_buf->mfg_year; 81 82 /* One of the four detailed_timings stores the monitor name. It's 83 * stored in an array of length 13. */ 84 for (i = 0; i < 4; i++) { 85 if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) { 86 while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) { 87 if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n') 88 break; 89 90 edid_caps->display_name[j] = 91 edid_buf->detailed_timings[i].data.other_data.data.str.str[j]; 92 j++; 93 } 94 } 95 } 96 97 edid_caps->edid_hdmi = drm_detect_hdmi_monitor( 98 (struct edid *) edid->raw_edid); 99 100 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); 101 if (sad_count <= 0) 102 return result; 103 104 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; 105 for (i = 0; i < edid_caps->audio_mode_count; ++i) { 106 struct cea_sad *sad = &sads[i]; 107 108 edid_caps->audio_modes[i].format_code = sad->format; 109 edid_caps->audio_modes[i].channel_count = sad->channels + 1; 110 edid_caps->audio_modes[i].sample_rate = sad->freq; 111 edid_caps->audio_modes[i].sample_size = sad->byte2; 112 } 113 114 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); 115 116 if (sadb_count < 0) { 117 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); 118 sadb_count = 0; 119 } 120 121 if (sadb_count) 122 edid_caps->speaker_flags = sadb[0]; 123 else 124 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; 125 126 kfree(sads); 127 kfree(sadb); 128 129 return result; 130 } 131 132 static void get_payload_table( 133 struct amdgpu_dm_connector *aconnector, 134 struct dp_mst_stream_allocation_table *proposed_table) 135 { 136 int i; 137 struct drm_dp_mst_topology_mgr *mst_mgr = 138 &aconnector->mst_port->mst_mgr; 139 140 mutex_lock(&mst_mgr->payload_lock); 141 142 proposed_table->stream_count = 0; 143 144 /* number of active streams */ 145 for (i = 0; i < mst_mgr->max_payloads; i++) { 146 if (mst_mgr->payloads[i].num_slots == 0) 147 break; /* end of vcp_id table */ 148 149 ASSERT(mst_mgr->payloads[i].payload_state != 150 DP_PAYLOAD_DELETE_LOCAL); 151 152 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL || 153 mst_mgr->payloads[i].payload_state == 154 DP_PAYLOAD_REMOTE) { 155 156 struct dp_mst_stream_allocation *sa = 157 &proposed_table->stream_allocations[ 158 proposed_table->stream_count]; 159 160 sa->slot_count = mst_mgr->payloads[i].num_slots; 161 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi; 162 proposed_table->stream_count++; 163 } 164 } 165 166 mutex_unlock(&mst_mgr->payload_lock); 167 } 168 169 void dm_helpers_dp_update_branch_info( 170 struct dc_context *ctx, 171 const struct dc_link *link) 172 {} 173 174 /* 175 * Writes payload allocation table in immediate downstream device. 176 */ 177 bool dm_helpers_dp_mst_write_payload_allocation_table( 178 struct dc_context *ctx, 179 const struct dc_stream_state *stream, 180 struct dp_mst_stream_allocation_table *proposed_table, 181 bool enable) 182 { 183 struct amdgpu_dm_connector *aconnector; 184 struct dm_connector_state *dm_conn_state; 185 struct drm_dp_mst_topology_mgr *mst_mgr; 186 struct drm_dp_mst_port *mst_port; 187 bool ret; 188 189 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 190 /* Accessing the connector state is required for vcpi_slots allocation 191 * and directly relies on behaviour in commit check 192 * that blocks before commit guaranteeing that the state 193 * is not gonna be swapped while still in use in commit tail */ 194 195 if (!aconnector || !aconnector->mst_port) 196 return false; 197 198 dm_conn_state = to_dm_connector_state(aconnector->base.state); 199 200 mst_mgr = &aconnector->mst_port->mst_mgr; 201 202 if (!mst_mgr->mst_state) 203 return false; 204 205 mst_port = aconnector->port; 206 207 if (enable) { 208 209 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, 210 dm_conn_state->pbn, 211 dm_conn_state->vcpi_slots); 212 if (!ret) 213 return false; 214 215 } else { 216 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); 217 } 218 219 ret = drm_dp_update_payload_part1(mst_mgr); 220 221 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 222 * AUX message. The sequence is slot 1-63 allocated sequence for each 223 * stream. AMD ASIC stream slot allocation should follow the same 224 * sequence. copy DRM MST allocation to dc */ 225 226 get_payload_table(aconnector, proposed_table); 227 228 if (ret) 229 return false; 230 231 return true; 232 } 233 234 /* 235 * poll pending down reply 236 */ 237 void dm_helpers_dp_mst_poll_pending_down_reply( 238 struct dc_context *ctx, 239 const struct dc_link *link) 240 {} 241 242 /* 243 * Clear payload allocation table before enable MST DP link. 244 */ 245 void dm_helpers_dp_mst_clear_payload_allocation_table( 246 struct dc_context *ctx, 247 const struct dc_link *link) 248 {} 249 250 /* 251 * Polls for ACT (allocation change trigger) handled and sends 252 * ALLOCATE_PAYLOAD message. 253 */ 254 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( 255 struct dc_context *ctx, 256 const struct dc_stream_state *stream) 257 { 258 struct amdgpu_dm_connector *aconnector; 259 struct drm_dp_mst_topology_mgr *mst_mgr; 260 int ret; 261 262 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 263 264 if (!aconnector || !aconnector->mst_port) 265 return ACT_FAILED; 266 267 mst_mgr = &aconnector->mst_port->mst_mgr; 268 269 if (!mst_mgr->mst_state) 270 return ACT_FAILED; 271 272 ret = drm_dp_check_act_status(mst_mgr); 273 274 if (ret) 275 return ACT_FAILED; 276 277 return ACT_SUCCESS; 278 } 279 280 bool dm_helpers_dp_mst_send_payload_allocation( 281 struct dc_context *ctx, 282 const struct dc_stream_state *stream, 283 bool enable) 284 { 285 struct amdgpu_dm_connector *aconnector; 286 struct drm_dp_mst_topology_mgr *mst_mgr; 287 struct drm_dp_mst_port *mst_port; 288 int ret; 289 290 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 291 292 if (!aconnector || !aconnector->mst_port) 293 return false; 294 295 mst_port = aconnector->port; 296 297 mst_mgr = &aconnector->mst_port->mst_mgr; 298 299 if (!mst_mgr->mst_state) 300 return false; 301 302 ret = drm_dp_update_payload_part2(mst_mgr); 303 304 if (ret) 305 return false; 306 307 if (!enable) 308 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); 309 310 return true; 311 } 312 313 void dm_dtn_log_begin(struct dc_context *ctx, 314 struct dc_log_buffer_ctx *log_ctx) 315 { 316 static const char msg[] = "[dtn begin]\n"; 317 318 if (!log_ctx) { 319 pr_info("%s", msg); 320 return; 321 } 322 323 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 324 } 325 326 void dm_dtn_log_append_v(struct dc_context *ctx, 327 struct dc_log_buffer_ctx *log_ctx, 328 const char *msg, ...) 329 { 330 va_list args; 331 size_t total; 332 int n; 333 334 if (!log_ctx) { 335 /* No context, redirect to dmesg. */ 336 struct va_format vaf; 337 338 vaf.fmt = msg; 339 vaf.va = &args; 340 341 va_start(args, msg); 342 pr_info("%pV", &vaf); 343 va_end(args); 344 345 return; 346 } 347 348 /* Measure the output. */ 349 va_start(args, msg); 350 n = vsnprintf(NULL, 0, msg, args); 351 va_end(args); 352 353 if (n <= 0) 354 return; 355 356 /* Reallocate the string buffer as needed. */ 357 total = log_ctx->pos + n + 1; 358 359 if (total > log_ctx->size) { 360 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL); 361 362 if (buf) { 363 memcpy(buf, log_ctx->buf, log_ctx->pos); 364 kfree(log_ctx->buf); 365 366 log_ctx->buf = buf; 367 log_ctx->size = total; 368 } 369 } 370 371 if (!log_ctx->buf) 372 return; 373 374 /* Write the formatted string to the log buffer. */ 375 va_start(args, msg); 376 n = vscnprintf( 377 log_ctx->buf + log_ctx->pos, 378 log_ctx->size - log_ctx->pos, 379 msg, 380 args); 381 va_end(args); 382 383 if (n > 0) 384 log_ctx->pos += n; 385 } 386 387 void dm_dtn_log_end(struct dc_context *ctx, 388 struct dc_log_buffer_ctx *log_ctx) 389 { 390 static const char msg[] = "[dtn end]\n"; 391 392 if (!log_ctx) { 393 pr_info("%s", msg); 394 return; 395 } 396 397 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 398 } 399 400 bool dm_helpers_dp_mst_start_top_mgr( 401 struct dc_context *ctx, 402 const struct dc_link *link, 403 bool boot) 404 { 405 struct amdgpu_dm_connector *aconnector = link->priv; 406 407 if (!aconnector) { 408 DRM_ERROR("Failed to found connector for link!"); 409 return false; 410 } 411 412 if (boot) { 413 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", 414 aconnector, aconnector->base.base.id); 415 return true; 416 } 417 418 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", 419 aconnector, aconnector->base.base.id); 420 421 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); 422 } 423 424 void dm_helpers_dp_mst_stop_top_mgr( 425 struct dc_context *ctx, 426 const struct dc_link *link) 427 { 428 struct amdgpu_dm_connector *aconnector = link->priv; 429 430 if (!aconnector) { 431 DRM_ERROR("Failed to found connector for link!"); 432 return; 433 } 434 435 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", 436 aconnector, aconnector->base.base.id); 437 438 if (aconnector->mst_mgr.mst_state == true) 439 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); 440 } 441 442 bool dm_helpers_dp_read_dpcd( 443 struct dc_context *ctx, 444 const struct dc_link *link, 445 uint32_t address, 446 uint8_t *data, 447 uint32_t size) 448 { 449 450 struct amdgpu_dm_connector *aconnector = link->priv; 451 452 if (!aconnector) { 453 DRM_ERROR("Failed to found connector for link!"); 454 return false; 455 } 456 457 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, 458 data, size) > 0; 459 } 460 461 bool dm_helpers_dp_write_dpcd( 462 struct dc_context *ctx, 463 const struct dc_link *link, 464 uint32_t address, 465 const uint8_t *data, 466 uint32_t size) 467 { 468 struct amdgpu_dm_connector *aconnector = link->priv; 469 470 if (!aconnector) { 471 DRM_ERROR("Failed to found connector for link!"); 472 return false; 473 } 474 475 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, 476 address, (uint8_t *)data, size) > 0; 477 } 478 479 bool dm_helpers_submit_i2c( 480 struct dc_context *ctx, 481 const struct dc_link *link, 482 struct i2c_command *cmd) 483 { 484 struct amdgpu_dm_connector *aconnector = link->priv; 485 struct i2c_msg *msgs; 486 int i = 0; 487 int num = cmd->number_of_payloads; 488 bool result; 489 490 if (!aconnector) { 491 DRM_ERROR("Failed to found connector for link!"); 492 return false; 493 } 494 495 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); 496 497 if (!msgs) 498 return false; 499 500 for (i = 0; i < num; i++) { 501 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; 502 msgs[i].addr = cmd->payloads[i].address; 503 msgs[i].len = cmd->payloads[i].length; 504 msgs[i].buf = cmd->payloads[i].data; 505 } 506 507 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; 508 509 kfree(msgs); 510 511 return result; 512 } 513 bool dm_helpers_dp_write_dsc_enable( 514 struct dc_context *ctx, 515 const struct dc_stream_state *stream, 516 bool enable 517 ) 518 { 519 uint8_t enable_dsc = enable ? 1 : 0; 520 struct amdgpu_dm_connector *aconnector; 521 522 if (!stream) 523 return false; 524 525 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 526 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 527 528 if (!aconnector->dsc_aux) 529 return false; 530 531 return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0); 532 } 533 534 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) 535 return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 536 537 return false; 538 } 539 540 bool dm_helpers_is_dp_sink_present(struct dc_link *link) 541 { 542 bool dp_sink_present; 543 struct amdgpu_dm_connector *aconnector = link->priv; 544 545 if (!aconnector) { 546 BUG_ON("Failed to found connector for link!"); 547 return true; 548 } 549 550 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex); 551 dp_sink_present = dc_link_is_dp_sink_present(link); 552 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex); 553 return dp_sink_present; 554 } 555 556 enum dc_edid_status dm_helpers_read_local_edid( 557 struct dc_context *ctx, 558 struct dc_link *link, 559 struct dc_sink *sink) 560 { 561 struct amdgpu_dm_connector *aconnector = link->priv; 562 struct i2c_adapter *ddc; 563 int retry = 3; 564 enum dc_edid_status edid_status; 565 struct edid *edid; 566 567 if (link->aux_mode) 568 ddc = &aconnector->dm_dp_aux.aux.ddc; 569 else 570 ddc = &aconnector->i2c->base; 571 572 /* some dongles read edid incorrectly the first time, 573 * do check sum and retry to make sure read correct edid. 574 */ 575 do { 576 577 edid = drm_get_edid(&aconnector->base, ddc); 578 579 if (!edid) 580 return EDID_NO_RESPONSE; 581 582 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); 583 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); 584 585 /* We don't need the original edid anymore */ 586 kfree(edid); 587 588 edid_status = dm_helpers_parse_edid_caps( 589 ctx, 590 &sink->dc_edid, 591 &sink->edid_caps); 592 593 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); 594 595 if (edid_status != EDID_OK) 596 DRM_ERROR("EDID err: %d, on connector: %s", 597 edid_status, 598 aconnector->base.name); 599 if (link->aux_mode) { 600 union test_request test_request = { {0} }; 601 union test_response test_response = { {0} }; 602 603 dm_helpers_dp_read_dpcd(ctx, 604 link, 605 DP_TEST_REQUEST, 606 &test_request.raw, 607 sizeof(union test_request)); 608 609 if (!test_request.bits.EDID_READ) 610 return edid_status; 611 612 test_response.bits.EDID_CHECKSUM_WRITE = 1; 613 614 dm_helpers_dp_write_dpcd(ctx, 615 link, 616 DP_TEST_EDID_CHECKSUM, 617 &sink->dc_edid.raw_edid[sink->dc_edid.length-1], 618 1); 619 620 dm_helpers_dp_write_dpcd(ctx, 621 link, 622 DP_TEST_RESPONSE, 623 &test_response.raw, 624 sizeof(test_response)); 625 626 } 627 628 return edid_status; 629 } 630 631 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) 632 { 633 /* TODO: something */ 634 } 635