1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/string.h> 27 #include <linux/acpi.h> 28 #include <linux/version.h> 29 #include <linux/i2c.h> 30 31 #include <drm/drmP.h> 32 #include <drm/drm_crtc_helper.h> 33 #include <drm/amdgpu_drm.h> 34 #include <drm/drm_edid.h> 35 36 #include "dm_services.h" 37 #include "amdgpu.h" 38 #include "dc.h" 39 #include "amdgpu_dm.h" 40 #include "amdgpu_dm_irq.h" 41 42 #include "dm_helpers.h" 43 44 /* dm_helpers_parse_edid_caps 45 * 46 * Parse edid caps 47 * 48 * @edid: [in] pointer to edid 49 * edid_caps: [in] pointer to edid caps 50 * @return 51 * void 52 * */ 53 enum dc_edid_status dm_helpers_parse_edid_caps( 54 struct dc_context *ctx, 55 const struct dc_edid *edid, 56 struct dc_edid_caps *edid_caps) 57 { 58 struct edid *edid_buf = (struct edid *) edid->raw_edid; 59 struct cea_sad *sads; 60 int sad_count = -1; 61 int sadb_count = -1; 62 int i = 0; 63 int j = 0; 64 uint8_t *sadb = NULL; 65 66 enum dc_edid_status result = EDID_OK; 67 68 if (!edid_caps || !edid) 69 return EDID_BAD_INPUT; 70 71 if (!drm_edid_is_valid(edid_buf)) 72 result = EDID_BAD_CHECKSUM; 73 74 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | 75 ((uint16_t) edid_buf->mfg_id[1])<<8; 76 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | 77 ((uint16_t) edid_buf->prod_code[1])<<8; 78 edid_caps->serial_number = edid_buf->serial; 79 edid_caps->manufacture_week = edid_buf->mfg_week; 80 edid_caps->manufacture_year = edid_buf->mfg_year; 81 82 /* One of the four detailed_timings stores the monitor name. It's 83 * stored in an array of length 13. */ 84 for (i = 0; i < 4; i++) { 85 if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) { 86 while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) { 87 if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n') 88 break; 89 90 edid_caps->display_name[j] = 91 edid_buf->detailed_timings[i].data.other_data.data.str.str[j]; 92 j++; 93 } 94 } 95 } 96 97 edid_caps->edid_hdmi = drm_detect_hdmi_monitor( 98 (struct edid *) edid->raw_edid); 99 100 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); 101 if (sad_count <= 0) { 102 DRM_INFO("SADs count is: %d, don't need to read it\n", 103 sad_count); 104 return result; 105 } 106 107 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; 108 for (i = 0; i < edid_caps->audio_mode_count; ++i) { 109 struct cea_sad *sad = &sads[i]; 110 111 edid_caps->audio_modes[i].format_code = sad->format; 112 edid_caps->audio_modes[i].channel_count = sad->channels + 1; 113 edid_caps->audio_modes[i].sample_rate = sad->freq; 114 edid_caps->audio_modes[i].sample_size = sad->byte2; 115 } 116 117 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); 118 119 if (sadb_count < 0) { 120 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); 121 sadb_count = 0; 122 } 123 124 if (sadb_count) 125 edid_caps->speaker_flags = sadb[0]; 126 else 127 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; 128 129 kfree(sads); 130 kfree(sadb); 131 132 return result; 133 } 134 135 static void get_payload_table( 136 struct amdgpu_dm_connector *aconnector, 137 struct dp_mst_stream_allocation_table *proposed_table) 138 { 139 int i; 140 struct drm_dp_mst_topology_mgr *mst_mgr = 141 &aconnector->mst_port->mst_mgr; 142 143 mutex_lock(&mst_mgr->payload_lock); 144 145 proposed_table->stream_count = 0; 146 147 /* number of active streams */ 148 for (i = 0; i < mst_mgr->max_payloads; i++) { 149 if (mst_mgr->payloads[i].num_slots == 0) 150 break; /* end of vcp_id table */ 151 152 ASSERT(mst_mgr->payloads[i].payload_state != 153 DP_PAYLOAD_DELETE_LOCAL); 154 155 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL || 156 mst_mgr->payloads[i].payload_state == 157 DP_PAYLOAD_REMOTE) { 158 159 struct dp_mst_stream_allocation *sa = 160 &proposed_table->stream_allocations[ 161 proposed_table->stream_count]; 162 163 sa->slot_count = mst_mgr->payloads[i].num_slots; 164 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi; 165 proposed_table->stream_count++; 166 } 167 } 168 169 mutex_unlock(&mst_mgr->payload_lock); 170 } 171 172 /* 173 * Writes payload allocation table in immediate downstream device. 174 */ 175 bool dm_helpers_dp_mst_write_payload_allocation_table( 176 struct dc_context *ctx, 177 const struct dc_stream_state *stream, 178 struct dp_mst_stream_allocation_table *proposed_table, 179 bool enable) 180 { 181 struct amdgpu_dm_connector *aconnector; 182 struct drm_dp_mst_topology_mgr *mst_mgr; 183 struct drm_dp_mst_port *mst_port; 184 int slots = 0; 185 bool ret; 186 int clock; 187 int bpp = 0; 188 int pbn = 0; 189 190 aconnector = stream->sink->priv; 191 192 if (!aconnector || !aconnector->mst_port) 193 return false; 194 195 mst_mgr = &aconnector->mst_port->mst_mgr; 196 197 if (!mst_mgr->mst_state) 198 return false; 199 200 mst_port = aconnector->port; 201 202 if (enable) { 203 clock = stream->timing.pix_clk_khz; 204 205 switch (stream->timing.display_color_depth) { 206 207 case COLOR_DEPTH_666: 208 bpp = 6; 209 break; 210 case COLOR_DEPTH_888: 211 bpp = 8; 212 break; 213 case COLOR_DEPTH_101010: 214 bpp = 10; 215 break; 216 case COLOR_DEPTH_121212: 217 bpp = 12; 218 break; 219 case COLOR_DEPTH_141414: 220 bpp = 14; 221 break; 222 case COLOR_DEPTH_161616: 223 bpp = 16; 224 break; 225 default: 226 ASSERT(bpp != 0); 227 break; 228 } 229 230 bpp = bpp * 3; 231 232 /* TODO need to know link rate */ 233 234 pbn = drm_dp_calc_pbn_mode(clock, bpp); 235 236 slots = drm_dp_find_vcpi_slots(mst_mgr, pbn); 237 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots); 238 239 if (!ret) 240 return false; 241 242 } else { 243 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); 244 } 245 246 ret = drm_dp_update_payload_part1(mst_mgr); 247 248 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 249 * AUX message. The sequence is slot 1-63 allocated sequence for each 250 * stream. AMD ASIC stream slot allocation should follow the same 251 * sequence. copy DRM MST allocation to dc */ 252 253 get_payload_table(aconnector, proposed_table); 254 255 if (ret) 256 return false; 257 258 return true; 259 } 260 261 262 /* 263 * Clear payload allocation table before enable MST DP link. 264 */ 265 void dm_helpers_dp_mst_clear_payload_allocation_table( 266 struct dc_context *ctx, 267 const struct dc_link *link) 268 {} 269 270 /* 271 * Polls for ACT (allocation change trigger) handled and sends 272 * ALLOCATE_PAYLOAD message. 273 */ 274 bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( 275 struct dc_context *ctx, 276 const struct dc_stream_state *stream) 277 { 278 struct amdgpu_dm_connector *aconnector; 279 struct drm_dp_mst_topology_mgr *mst_mgr; 280 int ret; 281 282 aconnector = stream->sink->priv; 283 284 if (!aconnector || !aconnector->mst_port) 285 return false; 286 287 mst_mgr = &aconnector->mst_port->mst_mgr; 288 289 if (!mst_mgr->mst_state) 290 return false; 291 292 ret = drm_dp_check_act_status(mst_mgr); 293 294 if (ret) 295 return false; 296 297 return true; 298 } 299 300 bool dm_helpers_dp_mst_send_payload_allocation( 301 struct dc_context *ctx, 302 const struct dc_stream_state *stream, 303 bool enable) 304 { 305 struct amdgpu_dm_connector *aconnector; 306 struct drm_dp_mst_topology_mgr *mst_mgr; 307 struct drm_dp_mst_port *mst_port; 308 int ret; 309 310 aconnector = stream->sink->priv; 311 312 if (!aconnector || !aconnector->mst_port) 313 return false; 314 315 mst_port = aconnector->port; 316 317 mst_mgr = &aconnector->mst_port->mst_mgr; 318 319 if (!mst_mgr->mst_state) 320 return false; 321 322 ret = drm_dp_update_payload_part2(mst_mgr); 323 324 if (ret) 325 return false; 326 327 if (!enable) 328 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); 329 330 return true; 331 } 332 333 bool dm_helpers_dc_conn_log(struct dc_context *ctx, struct log_entry *entry, enum dc_log_type event) 334 { 335 return true; 336 } 337 338 void dm_dtn_log_begin(struct dc_context *ctx) 339 {} 340 341 void dm_dtn_log_append_v(struct dc_context *ctx, 342 const char *pMsg, ...) 343 {} 344 345 void dm_dtn_log_end(struct dc_context *ctx) 346 {} 347 348 bool dm_helpers_dp_mst_start_top_mgr( 349 struct dc_context *ctx, 350 const struct dc_link *link, 351 bool boot) 352 { 353 struct amdgpu_dm_connector *aconnector = link->priv; 354 355 if (!aconnector) { 356 DRM_ERROR("Failed to found connector for link!"); 357 return false; 358 } 359 360 if (boot) { 361 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", 362 aconnector, aconnector->base.base.id); 363 return true; 364 } 365 366 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", 367 aconnector, aconnector->base.base.id); 368 369 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); 370 } 371 372 void dm_helpers_dp_mst_stop_top_mgr( 373 struct dc_context *ctx, 374 const struct dc_link *link) 375 { 376 struct amdgpu_dm_connector *aconnector = link->priv; 377 378 if (!aconnector) { 379 DRM_ERROR("Failed to found connector for link!"); 380 return; 381 } 382 383 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", 384 aconnector, aconnector->base.base.id); 385 386 if (aconnector->mst_mgr.mst_state == true) 387 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); 388 } 389 390 bool dm_helpers_dp_read_dpcd( 391 struct dc_context *ctx, 392 const struct dc_link *link, 393 uint32_t address, 394 uint8_t *data, 395 uint32_t size) 396 { 397 398 struct amdgpu_dm_connector *aconnector = link->priv; 399 400 if (!aconnector) { 401 DRM_ERROR("Failed to found connector for link!"); 402 return false; 403 } 404 405 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, 406 data, size) > 0; 407 } 408 409 bool dm_helpers_dp_write_dpcd( 410 struct dc_context *ctx, 411 const struct dc_link *link, 412 uint32_t address, 413 const uint8_t *data, 414 uint32_t size) 415 { 416 struct amdgpu_dm_connector *aconnector = link->priv; 417 418 if (!aconnector) { 419 DRM_ERROR("Failed to found connector for link!"); 420 return false; 421 } 422 423 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, 424 address, (uint8_t *)data, size) > 0; 425 } 426 427 bool dm_helpers_submit_i2c( 428 struct dc_context *ctx, 429 const struct dc_link *link, 430 struct i2c_command *cmd) 431 { 432 struct amdgpu_dm_connector *aconnector = link->priv; 433 struct i2c_msg *msgs; 434 int i = 0; 435 int num = cmd->number_of_payloads; 436 bool result; 437 438 if (!aconnector) { 439 DRM_ERROR("Failed to found connector for link!"); 440 return false; 441 } 442 443 msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL); 444 445 if (!msgs) 446 return false; 447 448 for (i = 0; i < num; i++) { 449 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; 450 msgs[i].addr = cmd->payloads[i].address; 451 msgs[i].len = cmd->payloads[i].length; 452 msgs[i].buf = cmd->payloads[i].data; 453 } 454 455 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; 456 457 kfree(msgs); 458 459 return result; 460 } 461 462 enum dc_edid_status dm_helpers_read_local_edid( 463 struct dc_context *ctx, 464 struct dc_link *link, 465 struct dc_sink *sink) 466 { 467 struct amdgpu_dm_connector *aconnector = link->priv; 468 struct i2c_adapter *ddc; 469 int retry = 3; 470 enum dc_edid_status edid_status; 471 struct edid *edid; 472 473 if (link->aux_mode) 474 ddc = &aconnector->dm_dp_aux.aux.ddc; 475 else 476 ddc = &aconnector->i2c->base; 477 478 /* some dongles read edid incorrectly the first time, 479 * do check sum and retry to make sure read correct edid. 480 */ 481 do { 482 483 edid = drm_get_edid(&aconnector->base, ddc); 484 485 if (!edid) 486 return EDID_NO_RESPONSE; 487 488 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); 489 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); 490 491 /* We don't need the original edid anymore */ 492 kfree(edid); 493 494 edid_status = dm_helpers_parse_edid_caps( 495 ctx, 496 &sink->dc_edid, 497 &sink->edid_caps); 498 499 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); 500 501 if (edid_status != EDID_OK) 502 DRM_ERROR("EDID err: %d, on connector: %s", 503 edid_status, 504 aconnector->base.name); 505 506 return edid_status; 507 } 508 509 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) 510 { 511 /* TODO: something */ 512 } 513