1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/string_helpers.h> 27 #include <linux/uaccess.h> 28 29 #include "dc.h" 30 #include "amdgpu.h" 31 #include "amdgpu_dm.h" 32 #include "amdgpu_dm_debugfs.h" 33 #include "amdgpu_dm_replay.h" 34 #include "dm_helpers.h" 35 #include "dmub/dmub_srv.h" 36 #include "resource.h" 37 #include "dsc.h" 38 #include "link_hwss.h" 39 #include "dc/dc_dmub_srv.h" 40 #include "link/protocols/link_dp_capability.h" 41 #include "inc/hw/dchubbub.h" 42 43 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 44 #include "amdgpu_dm_psr.h" 45 #endif 46 47 struct dmub_debugfs_trace_header { 48 uint32_t entry_count; 49 uint32_t reserved[3]; 50 }; 51 52 struct dmub_debugfs_trace_entry { 53 uint32_t trace_code; 54 uint32_t tick_count; 55 uint32_t param0; 56 uint32_t param1; 57 }; 58 59 static const char *const mst_progress_status[] = { 60 "probe", 61 "remote_edid", 62 "allocate_new_payload", 63 "clear_allocated_payload", 64 }; 65 66 /* parse_write_buffer_into_params - Helper function to parse debugfs write buffer into an array 67 * 68 * Function takes in attributes passed to debugfs write entry 69 * and writes into param array. 70 * The user passes max_param_num to identify maximum number of 71 * parameters that could be parsed. 72 * 73 */ 74 static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size, 75 long *param, const char __user *buf, 76 int max_param_num, 77 uint8_t *param_nums) 78 { 79 char *wr_buf_ptr = NULL; 80 uint32_t wr_buf_count = 0; 81 int r; 82 char *sub_str = NULL; 83 const char delimiter[3] = {' ', '\n', '\0'}; 84 uint8_t param_index = 0; 85 86 *param_nums = 0; 87 88 wr_buf_ptr = wr_buf; 89 90 /* r is bytes not be copied */ 91 if (copy_from_user(wr_buf_ptr, buf, wr_buf_size)) { 92 DRM_DEBUG_DRIVER("user data could not be read successfully\n"); 93 return -EFAULT; 94 } 95 96 /* check number of parameters. isspace could not differ space and \n */ 97 while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) { 98 /* skip space*/ 99 while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) { 100 wr_buf_ptr++; 101 wr_buf_count++; 102 } 103 104 if (wr_buf_count == wr_buf_size) 105 break; 106 107 /* skip non-space*/ 108 while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) { 109 wr_buf_ptr++; 110 wr_buf_count++; 111 } 112 113 (*param_nums)++; 114 115 if (wr_buf_count == wr_buf_size) 116 break; 117 } 118 119 if (*param_nums > max_param_num) 120 *param_nums = max_param_num; 121 122 wr_buf_ptr = wr_buf; /* reset buf pointer */ 123 wr_buf_count = 0; /* number of char already checked */ 124 125 while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) { 126 wr_buf_ptr++; 127 wr_buf_count++; 128 } 129 130 while (param_index < *param_nums) { 131 /* after strsep, wr_buf_ptr will be moved to after space */ 132 sub_str = strsep(&wr_buf_ptr, delimiter); 133 134 r = kstrtol(sub_str, 16, &(param[param_index])); 135 136 if (r) 137 DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r); 138 139 param_index++; 140 } 141 142 return 0; 143 } 144 145 /* function description 146 * get/ set DP configuration: lane_count, link_rate, spread_spectrum 147 * 148 * valid lane count value: 1, 2, 4 149 * valid link rate value: 150 * 06h = 1.62Gbps per lane 151 * 0Ah = 2.7Gbps per lane 152 * 0Ch = 3.24Gbps per lane 153 * 14h = 5.4Gbps per lane 154 * 1Eh = 8.1Gbps per lane 155 * 156 * debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings 157 * 158 * --- to get dp configuration 159 * 160 * cat /sys/kernel/debug/dri/0/DP-x/link_settings 161 * 162 * It will list current, verified, reported, preferred dp configuration. 163 * current -- for current video mode 164 * verified --- maximum configuration which pass link training 165 * reported --- DP rx report caps (DPCD register offset 0, 1 2) 166 * preferred --- user force settings 167 * 168 * --- set (or force) dp configuration 169 * 170 * echo <lane_count> <link_rate> > link_settings 171 * 172 * for example, to force to 2 lane, 2.7GHz, 173 * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings 174 * 175 * spread_spectrum could not be changed dynamically. 176 * 177 * in case invalid lane count, link rate are force, no hw programming will be 178 * done. please check link settings after force operation to see if HW get 179 * programming. 180 * 181 * cat /sys/kernel/debug/dri/0/DP-x/link_settings 182 * 183 * check current and preferred settings. 184 * 185 */ 186 static ssize_t dp_link_settings_read(struct file *f, char __user *buf, 187 size_t size, loff_t *pos) 188 { 189 struct amdgpu_dm_connector *connector = file_inode(f)->i_private; 190 struct dc_link *link = connector->dc_link; 191 char *rd_buf = NULL; 192 char *rd_buf_ptr = NULL; 193 const uint32_t rd_buf_size = 100; 194 uint32_t result = 0; 195 uint8_t str_len = 0; 196 int r; 197 198 if (*pos & 3 || size & 3) 199 return -EINVAL; 200 201 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 202 if (!rd_buf) 203 return 0; 204 205 rd_buf_ptr = rd_buf; 206 207 str_len = strlen("Current: %d 0x%x %d "); 208 snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ", 209 link->cur_link_settings.lane_count, 210 link->cur_link_settings.link_rate, 211 link->cur_link_settings.link_spread); 212 rd_buf_ptr += str_len; 213 214 str_len = strlen("Verified: %d 0x%x %d "); 215 snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ", 216 link->verified_link_cap.lane_count, 217 link->verified_link_cap.link_rate, 218 link->verified_link_cap.link_spread); 219 rd_buf_ptr += str_len; 220 221 str_len = strlen("Reported: %d 0x%x %d "); 222 snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ", 223 link->reported_link_cap.lane_count, 224 link->reported_link_cap.link_rate, 225 link->reported_link_cap.link_spread); 226 rd_buf_ptr += str_len; 227 228 str_len = strlen("Preferred: %d 0x%x %d "); 229 snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n", 230 link->preferred_link_setting.lane_count, 231 link->preferred_link_setting.link_rate, 232 link->preferred_link_setting.link_spread); 233 234 while (size) { 235 if (*pos >= rd_buf_size) 236 break; 237 238 r = put_user(*(rd_buf + result), buf); 239 if (r) { 240 kfree(rd_buf); 241 return r; /* r = -EFAULT */ 242 } 243 244 buf += 1; 245 size -= 1; 246 *pos += 1; 247 result += 1; 248 } 249 250 kfree(rd_buf); 251 return result; 252 } 253 254 static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, 255 size_t size, loff_t *pos) 256 { 257 struct amdgpu_dm_connector *connector = file_inode(f)->i_private; 258 struct dc_link *link = connector->dc_link; 259 struct amdgpu_device *adev = drm_to_adev(connector->base.dev); 260 struct dc *dc = (struct dc *)link->dc; 261 struct dc_link_settings prefer_link_settings; 262 char *wr_buf = NULL; 263 const uint32_t wr_buf_size = 40; 264 /* 0: lane_count; 1: link_rate */ 265 int max_param_num = 2; 266 uint8_t param_nums = 0; 267 long param[2]; 268 bool valid_input = true; 269 270 if (size == 0) 271 return -EINVAL; 272 273 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 274 if (!wr_buf) 275 return -ENOSPC; 276 277 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 278 (long *)param, buf, 279 max_param_num, 280 ¶m_nums)) { 281 kfree(wr_buf); 282 return -EINVAL; 283 } 284 285 if (param_nums <= 0) { 286 kfree(wr_buf); 287 DRM_DEBUG_DRIVER("user data not be read\n"); 288 return -EINVAL; 289 } 290 291 switch (param[0]) { 292 case LANE_COUNT_ONE: 293 case LANE_COUNT_TWO: 294 case LANE_COUNT_FOUR: 295 break; 296 default: 297 valid_input = false; 298 break; 299 } 300 301 switch (param[1]) { 302 case LINK_RATE_LOW: 303 case LINK_RATE_HIGH: 304 case LINK_RATE_RBR2: 305 case LINK_RATE_HIGH2: 306 case LINK_RATE_HIGH3: 307 case LINK_RATE_UHBR10: 308 case LINK_RATE_UHBR13_5: 309 case LINK_RATE_UHBR20: 310 break; 311 default: 312 valid_input = false; 313 break; 314 } 315 316 if (!valid_input) { 317 kfree(wr_buf); 318 DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n"); 319 mutex_lock(&adev->dm.dc_lock); 320 dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); 321 mutex_unlock(&adev->dm.dc_lock); 322 return size; 323 } 324 325 /* save user force lane_count, link_rate to preferred settings 326 * spread spectrum will not be changed 327 */ 328 prefer_link_settings.link_spread = link->cur_link_settings.link_spread; 329 prefer_link_settings.use_link_rate_set = false; 330 prefer_link_settings.lane_count = param[0]; 331 prefer_link_settings.link_rate = param[1]; 332 333 mutex_lock(&adev->dm.dc_lock); 334 dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, false); 335 mutex_unlock(&adev->dm.dc_lock); 336 337 kfree(wr_buf); 338 return size; 339 } 340 341 static bool dp_mst_is_end_device(struct amdgpu_dm_connector *aconnector) 342 { 343 bool is_end_device = false; 344 struct drm_dp_mst_topology_mgr *mgr = NULL; 345 struct drm_dp_mst_port *port = NULL; 346 347 if (aconnector->mst_root && aconnector->mst_root->mst_mgr.mst_state) { 348 mgr = &aconnector->mst_root->mst_mgr; 349 port = aconnector->mst_output_port; 350 351 drm_modeset_lock(&mgr->base.lock, NULL); 352 if (port->pdt == DP_PEER_DEVICE_SST_SINK || 353 port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) 354 is_end_device = true; 355 drm_modeset_unlock(&mgr->base.lock); 356 } 357 358 return is_end_device; 359 } 360 361 /* Change MST link setting 362 * 363 * valid lane count value: 1, 2, 4 364 * valid link rate value: 365 * 06h = 1.62Gbps per lane 366 * 0Ah = 2.7Gbps per lane 367 * 0Ch = 3.24Gbps per lane 368 * 14h = 5.4Gbps per lane 369 * 1Eh = 8.1Gbps per lane 370 * 3E8h = 10.0Gbps per lane 371 * 546h = 13.5Gbps per lane 372 * 7D0h = 20.0Gbps per lane 373 * 374 * debugfs is located at /sys/kernel/debug/dri/0/DP-x/mst_link_settings 375 * 376 * for example, to force to 2 lane, 10.0GHz, 377 * echo 2 0x3e8 > /sys/kernel/debug/dri/0/DP-x/mst_link_settings 378 * 379 * Valid input will trigger hotplug event to get new link setting applied 380 * Invalid input will trigger training setting reset 381 * 382 * The usage can be referred to link_settings entry 383 * 384 */ 385 static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf, 386 size_t size, loff_t *pos) 387 { 388 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 389 struct dc_link *link = aconnector->dc_link; 390 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 391 struct dc *dc = (struct dc *)link->dc; 392 struct dc_link_settings prefer_link_settings; 393 char *wr_buf = NULL; 394 const uint32_t wr_buf_size = 40; 395 /* 0: lane_count; 1: link_rate */ 396 int max_param_num = 2; 397 uint8_t param_nums = 0; 398 long param[2]; 399 bool valid_input = true; 400 401 if (!dp_mst_is_end_device(aconnector)) 402 return -EINVAL; 403 404 if (size == 0) 405 return -EINVAL; 406 407 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 408 if (!wr_buf) 409 return -ENOSPC; 410 411 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 412 (long *)param, buf, 413 max_param_num, 414 ¶m_nums)) { 415 kfree(wr_buf); 416 return -EINVAL; 417 } 418 419 if (param_nums <= 0) { 420 kfree(wr_buf); 421 DRM_DEBUG_DRIVER("user data not be read\n"); 422 return -EINVAL; 423 } 424 425 switch (param[0]) { 426 case LANE_COUNT_ONE: 427 case LANE_COUNT_TWO: 428 case LANE_COUNT_FOUR: 429 break; 430 default: 431 valid_input = false; 432 break; 433 } 434 435 switch (param[1]) { 436 case LINK_RATE_LOW: 437 case LINK_RATE_HIGH: 438 case LINK_RATE_RBR2: 439 case LINK_RATE_HIGH2: 440 case LINK_RATE_HIGH3: 441 case LINK_RATE_UHBR10: 442 case LINK_RATE_UHBR13_5: 443 case LINK_RATE_UHBR20: 444 break; 445 default: 446 valid_input = false; 447 break; 448 } 449 450 if (!valid_input) { 451 kfree(wr_buf); 452 DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n"); 453 mutex_lock(&adev->dm.dc_lock); 454 dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); 455 mutex_unlock(&adev->dm.dc_lock); 456 return -EINVAL; 457 } 458 459 /* save user force lane_count, link_rate to preferred settings 460 * spread spectrum will not be changed 461 */ 462 prefer_link_settings.link_spread = link->cur_link_settings.link_spread; 463 prefer_link_settings.use_link_rate_set = false; 464 prefer_link_settings.lane_count = param[0]; 465 prefer_link_settings.link_rate = param[1]; 466 467 /* skip immediate retrain, and train to new link setting after hotplug event triggered */ 468 mutex_lock(&adev->dm.dc_lock); 469 dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true); 470 mutex_unlock(&adev->dm.dc_lock); 471 472 mutex_lock(&aconnector->base.dev->mode_config.mutex); 473 aconnector->base.force = DRM_FORCE_OFF; 474 mutex_unlock(&aconnector->base.dev->mode_config.mutex); 475 drm_kms_helper_hotplug_event(aconnector->base.dev); 476 477 msleep(100); 478 479 mutex_lock(&aconnector->base.dev->mode_config.mutex); 480 aconnector->base.force = DRM_FORCE_UNSPECIFIED; 481 mutex_unlock(&aconnector->base.dev->mode_config.mutex); 482 drm_kms_helper_hotplug_event(aconnector->base.dev); 483 484 kfree(wr_buf); 485 return size; 486 } 487 488 /* function: get current DP PHY settings: voltage swing, pre-emphasis, 489 * post-cursor2 (defined by VESA DP specification) 490 * 491 * valid values 492 * voltage swing: 0,1,2,3 493 * pre-emphasis : 0,1,2,3 494 * post cursor2 : 0,1,2,3 495 * 496 * 497 * how to use this debugfs 498 * 499 * debugfs is located at /sys/kernel/debug/dri/0/DP-x 500 * 501 * there will be directories, like DP-1, DP-2,DP-3, etc. for DP display 502 * 503 * To figure out which DP-x is the display for DP to be check, 504 * cd DP-x 505 * ls -ll 506 * There should be debugfs file, like link_settings, phy_settings. 507 * cat link_settings 508 * from lane_count, link_rate to figure which DP-x is for display to be worked 509 * on 510 * 511 * To get current DP PHY settings, 512 * cat phy_settings 513 * 514 * To change DP PHY settings, 515 * echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings 516 * for examle, to change voltage swing to 2, pre-emphasis to 3, post_cursor2 to 517 * 0, 518 * echo 2 3 0 > phy_settings 519 * 520 * To check if change be applied, get current phy settings by 521 * cat phy_settings 522 * 523 * In case invalid values are set by user, like 524 * echo 1 4 0 > phy_settings 525 * 526 * HW will NOT be programmed by these settings. 527 * cat phy_settings will show the previous valid settings. 528 */ 529 static ssize_t dp_phy_settings_read(struct file *f, char __user *buf, 530 size_t size, loff_t *pos) 531 { 532 struct amdgpu_dm_connector *connector = file_inode(f)->i_private; 533 struct dc_link *link = connector->dc_link; 534 char *rd_buf = NULL; 535 const uint32_t rd_buf_size = 20; 536 uint32_t result = 0; 537 int r; 538 539 if (*pos & 3 || size & 3) 540 return -EINVAL; 541 542 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 543 if (!rd_buf) 544 return -EINVAL; 545 546 snprintf(rd_buf, rd_buf_size, " %d %d %d\n", 547 link->cur_lane_setting[0].VOLTAGE_SWING, 548 link->cur_lane_setting[0].PRE_EMPHASIS, 549 link->cur_lane_setting[0].POST_CURSOR2); 550 551 while (size) { 552 if (*pos >= rd_buf_size) 553 break; 554 555 r = put_user((*(rd_buf + result)), buf); 556 if (r) { 557 kfree(rd_buf); 558 return r; /* r = -EFAULT */ 559 } 560 561 buf += 1; 562 size -= 1; 563 *pos += 1; 564 result += 1; 565 } 566 567 kfree(rd_buf); 568 return result; 569 } 570 571 static int dp_lttpr_status_show(struct seq_file *m, void *unused) 572 { 573 struct drm_connector *connector = m->private; 574 struct amdgpu_dm_connector *aconnector = 575 to_amdgpu_dm_connector(connector); 576 struct dc_lttpr_caps caps = aconnector->dc_link->dpcd_caps.lttpr_caps; 577 578 if (connector->status != connector_status_connected) 579 return -ENODEV; 580 581 seq_printf(m, "phy repeater count: %u (raw: 0x%x)\n", 582 dp_parse_lttpr_repeater_count(caps.phy_repeater_cnt), 583 caps.phy_repeater_cnt); 584 585 seq_puts(m, "phy repeater mode: "); 586 587 switch (caps.mode) { 588 case DP_PHY_REPEATER_MODE_TRANSPARENT: 589 seq_puts(m, "transparent"); 590 break; 591 case DP_PHY_REPEATER_MODE_NON_TRANSPARENT: 592 seq_puts(m, "non-transparent"); 593 break; 594 case 0x00: 595 seq_puts(m, "non lttpr"); 596 break; 597 default: 598 seq_printf(m, "read error (raw: 0x%x)", caps.mode); 599 break; 600 } 601 602 seq_puts(m, "\n"); 603 return 0; 604 } 605 606 static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf, 607 size_t size, loff_t *pos) 608 { 609 struct amdgpu_dm_connector *connector = file_inode(f)->i_private; 610 struct dc_link *link = connector->dc_link; 611 struct dc *dc = (struct dc *)link->dc; 612 char *wr_buf = NULL; 613 uint32_t wr_buf_size = 40; 614 long param[3]; 615 bool use_prefer_link_setting; 616 struct link_training_settings link_lane_settings; 617 int max_param_num = 3; 618 uint8_t param_nums = 0; 619 int r = 0; 620 621 622 if (size == 0) 623 return -EINVAL; 624 625 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 626 if (!wr_buf) 627 return -ENOSPC; 628 629 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 630 (long *)param, buf, 631 max_param_num, 632 ¶m_nums)) { 633 kfree(wr_buf); 634 return -EINVAL; 635 } 636 637 if (param_nums <= 0) { 638 kfree(wr_buf); 639 DRM_DEBUG_DRIVER("user data not be read\n"); 640 return -EINVAL; 641 } 642 643 if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) || 644 (param[1] > PRE_EMPHASIS_MAX_LEVEL) || 645 (param[2] > POST_CURSOR2_MAX_LEVEL)) { 646 kfree(wr_buf); 647 DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n"); 648 return size; 649 } 650 651 /* get link settings: lane count, link rate */ 652 use_prefer_link_setting = 653 ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) && 654 (link->test_pattern_enabled)); 655 656 memset(&link_lane_settings, 0, sizeof(link_lane_settings)); 657 658 if (use_prefer_link_setting) { 659 link_lane_settings.link_settings.lane_count = 660 link->preferred_link_setting.lane_count; 661 link_lane_settings.link_settings.link_rate = 662 link->preferred_link_setting.link_rate; 663 link_lane_settings.link_settings.link_spread = 664 link->preferred_link_setting.link_spread; 665 } else { 666 link_lane_settings.link_settings.lane_count = 667 link->cur_link_settings.lane_count; 668 link_lane_settings.link_settings.link_rate = 669 link->cur_link_settings.link_rate; 670 link_lane_settings.link_settings.link_spread = 671 link->cur_link_settings.link_spread; 672 } 673 674 /* apply phy settings from user */ 675 for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) { 676 link_lane_settings.hw_lane_settings[r].VOLTAGE_SWING = 677 (enum dc_voltage_swing) (param[0]); 678 link_lane_settings.hw_lane_settings[r].PRE_EMPHASIS = 679 (enum dc_pre_emphasis) (param[1]); 680 link_lane_settings.hw_lane_settings[r].POST_CURSOR2 = 681 (enum dc_post_cursor2) (param[2]); 682 } 683 684 /* program ASIC registers and DPCD registers */ 685 dc_link_set_drive_settings(dc, &link_lane_settings, link); 686 687 kfree(wr_buf); 688 return size; 689 } 690 691 /* function description 692 * 693 * set PHY layer or Link layer test pattern 694 * PHY test pattern is used for PHY SI check. 695 * Link layer test will not affect PHY SI. 696 * 697 * Reset Test Pattern: 698 * 0 = DP_TEST_PATTERN_VIDEO_MODE 699 * 700 * PHY test pattern supported: 701 * 1 = DP_TEST_PATTERN_D102 702 * 2 = DP_TEST_PATTERN_SYMBOL_ERROR 703 * 3 = DP_TEST_PATTERN_PRBS7 704 * 4 = DP_TEST_PATTERN_80BIT_CUSTOM 705 * 5 = DP_TEST_PATTERN_CP2520_1 706 * 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE 707 * 7 = DP_TEST_PATTERN_CP2520_3 708 * 709 * DP PHY Link Training Patterns 710 * 8 = DP_TEST_PATTERN_TRAINING_PATTERN1 711 * 9 = DP_TEST_PATTERN_TRAINING_PATTERN2 712 * a = DP_TEST_PATTERN_TRAINING_PATTERN3 713 * b = DP_TEST_PATTERN_TRAINING_PATTERN4 714 * 715 * DP Link Layer Test pattern 716 * c = DP_TEST_PATTERN_COLOR_SQUARES 717 * d = DP_TEST_PATTERN_COLOR_SQUARES_CEA 718 * e = DP_TEST_PATTERN_VERTICAL_BARS 719 * f = DP_TEST_PATTERN_HORIZONTAL_BARS 720 * 10= DP_TEST_PATTERN_COLOR_RAMP 721 * 722 * debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x 723 * 724 * --- set test pattern 725 * echo <test pattern #> > test_pattern 726 * 727 * If test pattern # is not supported, NO HW programming will be done. 728 * for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data 729 * for the user pattern. input 10 bytes data are separated by space 730 * 731 * echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa > test_pattern 732 * 733 * --- reset test pattern 734 * echo 0 > test_pattern 735 * 736 * --- HPD detection is disabled when set PHY test pattern 737 * 738 * when PHY test pattern (pattern # within [1,7]) is set, HPD pin of HW ASIC 739 * is disable. User could unplug DP display from DP connected and plug scope to 740 * check test pattern PHY SI. 741 * If there is need unplug scope and plug DP display back, do steps below: 742 * echo 0 > phy_test_pattern 743 * unplug scope 744 * plug DP display. 745 * 746 * "echo 0 > phy_test_pattern" will re-enable HPD pin again so that video sw 747 * driver could detect "unplug scope" and "plug DP display" 748 */ 749 static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf, 750 size_t size, loff_t *pos) 751 { 752 struct amdgpu_dm_connector *connector = file_inode(f)->i_private; 753 struct dc_link *link = connector->dc_link; 754 char *wr_buf = NULL; 755 uint32_t wr_buf_size = 100; 756 long param[11] = {0x0}; 757 int max_param_num = 11; 758 enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; 759 bool disable_hpd = false; 760 bool valid_test_pattern = false; 761 uint8_t param_nums = 0; 762 /* init with default 80bit custom pattern */ 763 uint8_t custom_pattern[10] = { 764 0x1f, 0x7c, 0xf0, 0xc1, 0x07, 765 0x1f, 0x7c, 0xf0, 0xc1, 0x07 766 }; 767 struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN, 768 LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED}; 769 struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN, 770 LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED}; 771 struct link_training_settings link_training_settings; 772 int i; 773 774 if (size == 0) 775 return -EINVAL; 776 777 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 778 if (!wr_buf) 779 return -ENOSPC; 780 781 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 782 (long *)param, buf, 783 max_param_num, 784 ¶m_nums)) { 785 kfree(wr_buf); 786 return -EINVAL; 787 } 788 789 if (param_nums <= 0) { 790 kfree(wr_buf); 791 DRM_DEBUG_DRIVER("user data not be read\n"); 792 return -EINVAL; 793 } 794 795 796 test_pattern = param[0]; 797 798 switch (test_pattern) { 799 case DP_TEST_PATTERN_VIDEO_MODE: 800 case DP_TEST_PATTERN_COLOR_SQUARES: 801 case DP_TEST_PATTERN_COLOR_SQUARES_CEA: 802 case DP_TEST_PATTERN_VERTICAL_BARS: 803 case DP_TEST_PATTERN_HORIZONTAL_BARS: 804 case DP_TEST_PATTERN_COLOR_RAMP: 805 valid_test_pattern = true; 806 break; 807 808 case DP_TEST_PATTERN_D102: 809 case DP_TEST_PATTERN_SYMBOL_ERROR: 810 case DP_TEST_PATTERN_PRBS7: 811 case DP_TEST_PATTERN_80BIT_CUSTOM: 812 case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE: 813 case DP_TEST_PATTERN_TRAINING_PATTERN4: 814 disable_hpd = true; 815 valid_test_pattern = true; 816 break; 817 818 default: 819 valid_test_pattern = false; 820 test_pattern = DP_TEST_PATTERN_UNSUPPORTED; 821 break; 822 } 823 824 if (!valid_test_pattern) { 825 kfree(wr_buf); 826 DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n"); 827 return size; 828 } 829 830 if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) { 831 for (i = 0; i < 10; i++) { 832 if ((uint8_t) param[i + 1] != 0x0) 833 break; 834 } 835 836 if (i < 10) { 837 /* not use default value */ 838 for (i = 0; i < 10; i++) 839 custom_pattern[i] = (uint8_t) param[i + 1]; 840 } 841 } 842 843 /* Usage: set DP physical test pattern using debugfs with normal DP 844 * panel. Then plug out DP panel and connect a scope to measure 845 * For normal video mode and test pattern generated from CRCT, 846 * they are visibile to user. So do not disable HPD. 847 * Video Mode is also set to clear the test pattern, so enable HPD 848 * because it might have been disabled after a test pattern was set. 849 * AUX depends on HPD * sequence dependent, do not move! 850 */ 851 if (!disable_hpd) 852 dc_link_enable_hpd(link); 853 854 prefer_link_settings.lane_count = link->verified_link_cap.lane_count; 855 prefer_link_settings.link_rate = link->verified_link_cap.link_rate; 856 prefer_link_settings.link_spread = link->verified_link_cap.link_spread; 857 858 cur_link_settings.lane_count = link->cur_link_settings.lane_count; 859 cur_link_settings.link_rate = link->cur_link_settings.link_rate; 860 cur_link_settings.link_spread = link->cur_link_settings.link_spread; 861 862 link_training_settings.link_settings = cur_link_settings; 863 864 865 if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { 866 if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN && 867 prefer_link_settings.link_rate != LINK_RATE_UNKNOWN && 868 (prefer_link_settings.lane_count != cur_link_settings.lane_count || 869 prefer_link_settings.link_rate != cur_link_settings.link_rate)) 870 link_training_settings.link_settings = prefer_link_settings; 871 } 872 873 for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++) 874 link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i]; 875 876 dc_link_dp_set_test_pattern( 877 link, 878 test_pattern, 879 DP_TEST_PATTERN_COLOR_SPACE_RGB, 880 &link_training_settings, 881 custom_pattern, 882 10); 883 884 /* Usage: Set DP physical test pattern using AMDDP with normal DP panel 885 * Then plug out DP panel and connect a scope to measure DP PHY signal. 886 * Need disable interrupt to avoid SW driver disable DP output. This is 887 * done after the test pattern is set. 888 */ 889 if (valid_test_pattern && disable_hpd) 890 dc_link_disable_hpd(link); 891 892 kfree(wr_buf); 893 894 return size; 895 } 896 897 /* 898 * Returns the DMCUB tracebuffer contents. 899 * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer 900 */ 901 static int dmub_tracebuffer_show(struct seq_file *m, void *data) 902 { 903 struct amdgpu_device *adev = m->private; 904 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 905 struct dmub_debugfs_trace_entry *entries; 906 uint8_t *tbuf_base; 907 uint32_t tbuf_size, max_entries, num_entries, i; 908 909 if (!fb_info) 910 return 0; 911 912 tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr; 913 if (!tbuf_base) 914 return 0; 915 916 tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size; 917 max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) / 918 sizeof(struct dmub_debugfs_trace_entry); 919 920 num_entries = 921 ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count; 922 923 num_entries = min(num_entries, max_entries); 924 925 entries = (struct dmub_debugfs_trace_entry 926 *)(tbuf_base + 927 sizeof(struct dmub_debugfs_trace_header)); 928 929 for (i = 0; i < num_entries; ++i) { 930 struct dmub_debugfs_trace_entry *entry = &entries[i]; 931 932 seq_printf(m, 933 "trace_code=%u tick_count=%u param0=%u param1=%u\n", 934 entry->trace_code, entry->tick_count, entry->param0, 935 entry->param1); 936 } 937 938 return 0; 939 } 940 941 /* 942 * Returns the DMCUB firmware state contents. 943 * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state 944 */ 945 static int dmub_fw_state_show(struct seq_file *m, void *data) 946 { 947 struct amdgpu_device *adev = m->private; 948 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 949 uint8_t *state_base; 950 uint32_t state_size; 951 952 if (!fb_info) 953 return 0; 954 955 state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr; 956 if (!state_base) 957 return 0; 958 959 state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size; 960 961 return seq_write(m, state_base, state_size); 962 } 963 964 /* replay_capability_show() - show eDP panel replay capability 965 * 966 * The read function: replay_capability_show 967 * Shows if sink and driver has Replay capability or not. 968 * 969 * cat /sys/kernel/debug/dri/0/eDP-X/replay_capability 970 * 971 * Expected output: 972 * "Sink support: no\n" - if panel doesn't support Replay 973 * "Sink support: yes\n" - if panel supports Replay 974 * "Driver support: no\n" - if driver doesn't support Replay 975 * "Driver support: yes\n" - if driver supports Replay 976 */ 977 static int replay_capability_show(struct seq_file *m, void *data) 978 { 979 struct drm_connector *connector = m->private; 980 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 981 struct dc_link *link = aconnector->dc_link; 982 bool sink_support_replay = false; 983 bool driver_support_replay = false; 984 985 if (!link) 986 return -ENODEV; 987 988 if (link->type == dc_connection_none) 989 return -ENODEV; 990 991 if (!(link->connector_signal & SIGNAL_TYPE_EDP)) 992 return -ENODEV; 993 994 /* If Replay is already set to support, skip the checks */ 995 if (link->replay_settings.config.replay_supported) { 996 sink_support_replay = true; 997 driver_support_replay = true; 998 } else if ((amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { 999 sink_support_replay = amdgpu_dm_link_supports_replay(link, aconnector); 1000 } else { 1001 struct dc *dc = link->ctx->dc; 1002 1003 sink_support_replay = amdgpu_dm_link_supports_replay(link, aconnector); 1004 if (dc->ctx->dmub_srv && dc->ctx->dmub_srv->dmub) 1005 driver_support_replay = 1006 (bool)dc->ctx->dmub_srv->dmub->feature_caps.replay_supported; 1007 } 1008 1009 seq_printf(m, "Sink support: %s\n", str_yes_no(sink_support_replay)); 1010 seq_printf(m, "Driver support: %s\n", str_yes_no(driver_support_replay)); 1011 1012 return 0; 1013 } 1014 1015 /* psr_capability_show() - show eDP panel PSR capability 1016 * 1017 * The read function: sink_psr_capability_show 1018 * Shows if sink has PSR capability or not. 1019 * If yes - the PSR version is appended 1020 * 1021 * cat /sys/kernel/debug/dri/0/eDP-X/psr_capability 1022 * 1023 * Expected output: 1024 * "Sink support: no\n" - if panel doesn't support PSR 1025 * "Sink support: yes [0x01]\n" - if panel supports PSR1 1026 * "Driver support: no\n" - if driver doesn't support PSR 1027 * "Driver support: yes [0x01]\n" - if driver supports PSR1 1028 */ 1029 static int psr_capability_show(struct seq_file *m, void *data) 1030 { 1031 struct drm_connector *connector = m->private; 1032 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 1033 struct dc_link *link = aconnector->dc_link; 1034 1035 if (!link) 1036 return -ENODEV; 1037 1038 if (link->type == dc_connection_none) 1039 return -ENODEV; 1040 1041 if (!(link->connector_signal & SIGNAL_TYPE_EDP)) 1042 return -ENODEV; 1043 1044 seq_printf(m, "Sink support: %s", str_yes_no(link->dpcd_caps.psr_info.psr_version != 0)); 1045 if (link->dpcd_caps.psr_info.psr_version) 1046 seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_info.psr_version); 1047 seq_puts(m, "\n"); 1048 1049 seq_printf(m, "Driver support: %s", str_yes_no(link->psr_settings.psr_feature_enabled)); 1050 if (link->psr_settings.psr_version) 1051 seq_printf(m, " [0x%02x]", link->psr_settings.psr_version); 1052 seq_puts(m, "\n"); 1053 1054 return 0; 1055 } 1056 1057 /* 1058 * Returns the current bpc for the crtc. 1059 * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_bpc 1060 */ 1061 static int amdgpu_current_bpc_show(struct seq_file *m, void *data) 1062 { 1063 struct drm_crtc *crtc = m->private; 1064 struct drm_device *dev = crtc->dev; 1065 struct dm_crtc_state *dm_crtc_state = NULL; 1066 int res = -ENODEV; 1067 unsigned int bpc; 1068 1069 mutex_lock(&dev->mode_config.mutex); 1070 drm_modeset_lock(&crtc->mutex, NULL); 1071 if (crtc->state == NULL) 1072 goto unlock; 1073 1074 dm_crtc_state = to_dm_crtc_state(crtc->state); 1075 if (dm_crtc_state->stream == NULL) 1076 goto unlock; 1077 1078 switch (dm_crtc_state->stream->timing.display_color_depth) { 1079 case COLOR_DEPTH_666: 1080 bpc = 6; 1081 break; 1082 case COLOR_DEPTH_888: 1083 bpc = 8; 1084 break; 1085 case COLOR_DEPTH_101010: 1086 bpc = 10; 1087 break; 1088 case COLOR_DEPTH_121212: 1089 bpc = 12; 1090 break; 1091 case COLOR_DEPTH_161616: 1092 bpc = 16; 1093 break; 1094 default: 1095 goto unlock; 1096 } 1097 1098 seq_printf(m, "Current: %u\n", bpc); 1099 res = 0; 1100 1101 unlock: 1102 drm_modeset_unlock(&crtc->mutex); 1103 mutex_unlock(&dev->mode_config.mutex); 1104 1105 return res; 1106 } 1107 DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc); 1108 1109 /* 1110 * Returns the current colorspace for the crtc. 1111 * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_colorspace 1112 */ 1113 static int amdgpu_current_colorspace_show(struct seq_file *m, void *data) 1114 { 1115 struct drm_crtc *crtc = m->private; 1116 struct drm_device *dev = crtc->dev; 1117 struct dm_crtc_state *dm_crtc_state = NULL; 1118 int res = -ENODEV; 1119 1120 mutex_lock(&dev->mode_config.mutex); 1121 drm_modeset_lock(&crtc->mutex, NULL); 1122 if (crtc->state == NULL) 1123 goto unlock; 1124 1125 dm_crtc_state = to_dm_crtc_state(crtc->state); 1126 if (dm_crtc_state->stream == NULL) 1127 goto unlock; 1128 1129 switch (dm_crtc_state->stream->output_color_space) { 1130 case COLOR_SPACE_SRGB: 1131 seq_puts(m, "sRGB"); 1132 break; 1133 case COLOR_SPACE_YCBCR601: 1134 case COLOR_SPACE_YCBCR601_LIMITED: 1135 seq_puts(m, "BT601_YCC"); 1136 break; 1137 case COLOR_SPACE_YCBCR709: 1138 case COLOR_SPACE_YCBCR709_LIMITED: 1139 seq_puts(m, "BT709_YCC"); 1140 break; 1141 case COLOR_SPACE_ADOBERGB: 1142 seq_puts(m, "opRGB"); 1143 break; 1144 case COLOR_SPACE_2020_RGB_FULLRANGE: 1145 seq_puts(m, "BT2020_RGB"); 1146 break; 1147 case COLOR_SPACE_2020_YCBCR: 1148 seq_puts(m, "BT2020_YCC"); 1149 break; 1150 default: 1151 goto unlock; 1152 } 1153 res = 0; 1154 1155 unlock: 1156 drm_modeset_unlock(&crtc->mutex); 1157 mutex_unlock(&dev->mode_config.mutex); 1158 1159 return res; 1160 } 1161 DEFINE_SHOW_ATTRIBUTE(amdgpu_current_colorspace); 1162 1163 1164 /* 1165 * Example usage: 1166 * Disable dsc passthrough, i.e.,: have dsc decoding at converver, not external RX 1167 * echo 1 /sys/kernel/debug/dri/0/DP-1/dsc_disable_passthrough 1168 * Enable dsc passthrough, i.e.,: have dsc passthrough to external RX 1169 * echo 0 /sys/kernel/debug/dri/0/DP-1/dsc_disable_passthrough 1170 */ 1171 static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf, 1172 size_t size, loff_t *pos) 1173 { 1174 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 1175 char *wr_buf = NULL; 1176 uint32_t wr_buf_size = 42; 1177 int max_param_num = 1; 1178 long param; 1179 uint8_t param_nums = 0; 1180 1181 if (size == 0) 1182 return -EINVAL; 1183 1184 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 1185 1186 if (!wr_buf) { 1187 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); 1188 return -ENOSPC; 1189 } 1190 1191 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 1192 ¶m, buf, 1193 max_param_num, 1194 ¶m_nums)) { 1195 kfree(wr_buf); 1196 return -EINVAL; 1197 } 1198 1199 aconnector->dsc_settings.dsc_force_disable_passthrough = param; 1200 1201 kfree(wr_buf); 1202 return 0; 1203 } 1204 1205 /* 1206 * Returns the HDCP capability of the Display (1.4 for now). 1207 * 1208 * NOTE* Not all HDMI displays report their HDCP caps even when they are capable. 1209 * Since its rare for a display to not be HDCP 1.4 capable, we set HDMI as always capable. 1210 * 1211 * Example usage: cat /sys/kernel/debug/dri/0/DP-1/hdcp_sink_capability 1212 * or cat /sys/kernel/debug/dri/0/HDMI-A-1/hdcp_sink_capability 1213 */ 1214 static int hdcp_sink_capability_show(struct seq_file *m, void *data) 1215 { 1216 struct drm_connector *connector = m->private; 1217 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 1218 bool hdcp_cap, hdcp2_cap; 1219 1220 if (connector->status != connector_status_connected) 1221 return -ENODEV; 1222 1223 seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id); 1224 1225 hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link, aconnector->dc_sink->sink_signal); 1226 hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link, aconnector->dc_sink->sink_signal); 1227 1228 1229 if (hdcp_cap) 1230 seq_printf(m, "%s ", "HDCP1.4"); 1231 if (hdcp2_cap) 1232 seq_printf(m, "%s ", "HDCP2.2"); 1233 1234 if (!hdcp_cap && !hdcp2_cap) 1235 seq_printf(m, "%s ", "None"); 1236 1237 seq_puts(m, "\n"); 1238 1239 return 0; 1240 } 1241 1242 /* 1243 * Returns whether the connected display is internal and not hotpluggable. 1244 * Example usage: cat /sys/kernel/debug/dri/0/DP-1/internal_display 1245 */ 1246 static int internal_display_show(struct seq_file *m, void *data) 1247 { 1248 struct drm_connector *connector = m->private; 1249 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 1250 struct dc_link *link = aconnector->dc_link; 1251 1252 seq_printf(m, "Internal: %u\n", link->is_internal_display); 1253 1254 return 0; 1255 } 1256 1257 /* 1258 * Returns the number of segments used if ODM Combine mode is enabled. 1259 * Example usage: cat /sys/kernel/debug/dri/0/DP-1/odm_combine_segments 1260 */ 1261 static int odm_combine_segments_show(struct seq_file *m, void *unused) 1262 { 1263 struct drm_connector *connector = m->private; 1264 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 1265 struct dc_link *link = aconnector->dc_link; 1266 struct pipe_ctx *pipe_ctx = NULL; 1267 int i, segments = -EOPNOTSUPP; 1268 1269 for (i = 0; i < MAX_PIPES; i++) { 1270 pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; 1271 if (pipe_ctx->stream && 1272 pipe_ctx->stream->link == link) 1273 break; 1274 } 1275 1276 if (connector->status != connector_status_connected) 1277 return -ENODEV; 1278 1279 if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments) 1280 pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments); 1281 1282 seq_printf(m, "%d\n", segments); 1283 return 0; 1284 } 1285 1286 /* function description 1287 * 1288 * generic SDP message access for testing 1289 * 1290 * debugfs sdp_message is located at /syskernel/debug/dri/0/DP-x 1291 * 1292 * SDP header 1293 * Hb0 : Secondary-Data Packet ID 1294 * Hb1 : Secondary-Data Packet type 1295 * Hb2 : Secondary-Data-packet-specific header, Byte 0 1296 * Hb3 : Secondary-Data-packet-specific header, Byte 1 1297 * 1298 * for using custom sdp message: input 4 bytes SDP header and 32 bytes raw data 1299 */ 1300 static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *buf, 1301 size_t size, loff_t *pos) 1302 { 1303 int r; 1304 uint8_t data[36] = {0}; 1305 struct amdgpu_dm_connector *connector = file_inode(f)->i_private; 1306 struct dm_crtc_state *acrtc_state; 1307 uint32_t write_size = 36; 1308 1309 if (connector->base.status != connector_status_connected) 1310 return -ENODEV; 1311 1312 if (size == 0) 1313 return 0; 1314 1315 acrtc_state = to_dm_crtc_state(connector->base.state->crtc->state); 1316 1317 r = copy_from_user(data, buf, write_size); 1318 1319 write_size -= r; 1320 1321 dc_stream_send_dp_sdp(acrtc_state->stream, data, write_size); 1322 1323 return write_size; 1324 } 1325 1326 /* function: Read link's DSC & FEC capabilities 1327 * 1328 * 1329 * Access it with the following command (you need to specify 1330 * connector like DP-1): 1331 * 1332 * cat /sys/kernel/debug/dri/0/DP-X/dp_dsc_fec_support 1333 * 1334 */ 1335 static int dp_dsc_fec_support_show(struct seq_file *m, void *data) 1336 { 1337 struct drm_connector *connector = m->private; 1338 struct drm_modeset_acquire_ctx ctx; 1339 struct drm_device *dev = connector->dev; 1340 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 1341 int ret = 0; 1342 bool try_again = false; 1343 bool is_fec_supported = false; 1344 bool is_dsc_supported = false; 1345 struct dpcd_caps dpcd_caps; 1346 1347 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 1348 do { 1349 try_again = false; 1350 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); 1351 if (ret) { 1352 if (ret == -EDEADLK) { 1353 ret = drm_modeset_backoff(&ctx); 1354 if (!ret) { 1355 try_again = true; 1356 continue; 1357 } 1358 } 1359 break; 1360 } 1361 if (connector->status != connector_status_connected) { 1362 ret = -ENODEV; 1363 break; 1364 } 1365 dpcd_caps = aconnector->dc_link->dpcd_caps; 1366 if (aconnector->mst_output_port) { 1367 /* aconnector sets dsc_aux during get_modes call 1368 * if MST connector has it means it can either 1369 * enable DSC on the sink device or on MST branch 1370 * its connected to. 1371 */ 1372 if (aconnector->dsc_aux) { 1373 is_fec_supported = true; 1374 is_dsc_supported = true; 1375 } 1376 } else { 1377 is_fec_supported = dpcd_caps.fec_cap.raw & 0x1; 1378 is_dsc_supported = dpcd_caps.dsc_caps.dsc_basic_caps.raw[0] & 0x1; 1379 } 1380 } while (try_again); 1381 1382 drm_modeset_drop_locks(&ctx); 1383 drm_modeset_acquire_fini(&ctx); 1384 1385 seq_printf(m, "FEC_Sink_Support: %s\n", str_yes_no(is_fec_supported)); 1386 seq_printf(m, "DSC_Sink_Support: %s\n", str_yes_no(is_dsc_supported)); 1387 1388 return ret; 1389 } 1390 1391 /* function: Trigger virtual HPD redetection on connector 1392 * 1393 * This function will perform link rediscovery, link disable 1394 * and enable, and dm connector state update. 1395 * 1396 * Retrigger HPD on an existing connector by echoing 1 into 1397 * its respectful "trigger_hotplug" debugfs entry: 1398 * 1399 * echo 1 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug 1400 * 1401 * This function can perform HPD unplug: 1402 * 1403 * echo 0 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug 1404 * 1405 */ 1406 static ssize_t trigger_hotplug(struct file *f, const char __user *buf, 1407 size_t size, loff_t *pos) 1408 { 1409 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 1410 struct drm_connector *connector = &aconnector->base; 1411 struct dc_link *link = NULL; 1412 struct drm_device *dev = connector->dev; 1413 struct amdgpu_device *adev = drm_to_adev(dev); 1414 enum dc_connection_type new_connection_type = dc_connection_none; 1415 char *wr_buf = NULL; 1416 uint32_t wr_buf_size = 42; 1417 int max_param_num = 1; 1418 long param[1] = {0}; 1419 uint8_t param_nums = 0; 1420 bool ret = false; 1421 1422 if (!aconnector || !aconnector->dc_link) 1423 return -EINVAL; 1424 1425 if (size == 0) 1426 return -EINVAL; 1427 1428 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 1429 1430 if (!wr_buf) { 1431 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); 1432 return -ENOSPC; 1433 } 1434 1435 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 1436 (long *)param, buf, 1437 max_param_num, 1438 ¶m_nums)) { 1439 kfree(wr_buf); 1440 return -EINVAL; 1441 } 1442 1443 kfree(wr_buf); 1444 1445 if (param_nums <= 0) { 1446 DRM_DEBUG_DRIVER("user data not be read\n"); 1447 return -EINVAL; 1448 } 1449 1450 mutex_lock(&aconnector->hpd_lock); 1451 1452 /* Don't support for mst end device*/ 1453 if (aconnector->mst_root) { 1454 mutex_unlock(&aconnector->hpd_lock); 1455 return -EINVAL; 1456 } 1457 1458 if (param[0] == 1) { 1459 1460 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type) && 1461 new_connection_type != dc_connection_none) 1462 goto unlock; 1463 1464 mutex_lock(&adev->dm.dc_lock); 1465 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 1466 mutex_unlock(&adev->dm.dc_lock); 1467 1468 if (!ret) 1469 goto unlock; 1470 1471 amdgpu_dm_update_connector_after_detect(aconnector); 1472 1473 drm_modeset_lock_all(dev); 1474 dm_restore_drm_connector_state(dev, connector); 1475 drm_modeset_unlock_all(dev); 1476 1477 drm_kms_helper_connector_hotplug_event(connector); 1478 } else if (param[0] == 0) { 1479 if (!aconnector->dc_link) 1480 goto unlock; 1481 1482 link = aconnector->dc_link; 1483 1484 if (link->local_sink) { 1485 dc_sink_release(link->local_sink); 1486 link->local_sink = NULL; 1487 } 1488 1489 link->dpcd_sink_count = 0; 1490 link->type = dc_connection_none; 1491 link->dongle_max_pix_clk = 0; 1492 1493 amdgpu_dm_update_connector_after_detect(aconnector); 1494 1495 /* If the aconnector is the root node in mst topology */ 1496 if (aconnector->mst_mgr.mst_state == true) 1497 dc_link_reset_cur_dp_mst_topology(link); 1498 1499 drm_modeset_lock_all(dev); 1500 dm_restore_drm_connector_state(dev, connector); 1501 drm_modeset_unlock_all(dev); 1502 1503 drm_kms_helper_connector_hotplug_event(connector); 1504 } 1505 1506 unlock: 1507 mutex_unlock(&aconnector->hpd_lock); 1508 1509 return size; 1510 } 1511 1512 /* function: read DSC status on the connector 1513 * 1514 * The read function: dp_dsc_clock_en_read 1515 * returns current status of DSC clock on the connector. 1516 * The return is a boolean flag: 1 or 0. 1517 * 1518 * Access it with the following command (you need to specify 1519 * connector like DP-1): 1520 * 1521 * cat /sys/kernel/debug/dri/0/DP-X/dsc_clock_en 1522 * 1523 * Expected output: 1524 * 1 - means that DSC is currently enabled 1525 * 0 - means that DSC is disabled 1526 */ 1527 static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, 1528 size_t size, loff_t *pos) 1529 { 1530 char *rd_buf = NULL; 1531 char *rd_buf_ptr = NULL; 1532 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 1533 struct display_stream_compressor *dsc; 1534 struct dcn_dsc_state dsc_state = {0}; 1535 const uint32_t rd_buf_size = 10; 1536 struct pipe_ctx *pipe_ctx; 1537 ssize_t result = 0; 1538 int i, r, str_len = 10; 1539 1540 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 1541 1542 if (!rd_buf) 1543 return -ENOMEM; 1544 1545 rd_buf_ptr = rd_buf; 1546 1547 for (i = 0; i < MAX_PIPES; i++) { 1548 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1549 if (pipe_ctx->stream && 1550 pipe_ctx->stream->link == aconnector->dc_link && 1551 pipe_ctx->stream->sink && 1552 pipe_ctx->stream->sink == aconnector->dc_sink) 1553 break; 1554 } 1555 1556 dsc = pipe_ctx->stream_res.dsc; 1557 if (dsc) 1558 dsc->funcs->dsc_read_state(dsc, &dsc_state); 1559 1560 snprintf(rd_buf_ptr, str_len, 1561 "%d\n", 1562 dsc_state.dsc_clock_en); 1563 rd_buf_ptr += str_len; 1564 1565 while (size) { 1566 if (*pos >= rd_buf_size) 1567 break; 1568 1569 r = put_user(*(rd_buf + result), buf); 1570 if (r) { 1571 kfree(rd_buf); 1572 return r; /* r = -EFAULT */ 1573 } 1574 1575 buf += 1; 1576 size -= 1; 1577 *pos += 1; 1578 result += 1; 1579 } 1580 1581 kfree(rd_buf); 1582 return result; 1583 } 1584 1585 /* function: write force DSC on the connector 1586 * 1587 * The write function: dp_dsc_clock_en_write 1588 * enables to force DSC on the connector. 1589 * User can write to either force enable or force disable DSC 1590 * on the next modeset or set it to driver default 1591 * 1592 * Accepted inputs: 1593 * 0 - default DSC enablement policy 1594 * 1 - force enable DSC on the connector 1595 * 2 - force disable DSC on the connector (might cause fail in atomic_check) 1596 * 1597 * Writing DSC settings is done with the following command: 1598 * - To force enable DSC (you need to specify 1599 * connector like DP-1): 1600 * 1601 * echo 0x1 > /sys/kernel/debug/dri/0/DP-X/dsc_clock_en 1602 * 1603 * - To return to default state set the flag to zero and 1604 * let driver deal with DSC automatically 1605 * (you need to specify connector like DP-1): 1606 * 1607 * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_clock_en 1608 * 1609 */ 1610 static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, 1611 size_t size, loff_t *pos) 1612 { 1613 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 1614 struct drm_connector *connector = &aconnector->base; 1615 struct drm_device *dev = connector->dev; 1616 struct drm_crtc *crtc = NULL; 1617 struct dm_crtc_state *dm_crtc_state = NULL; 1618 struct pipe_ctx *pipe_ctx; 1619 int i; 1620 char *wr_buf = NULL; 1621 uint32_t wr_buf_size = 42; 1622 int max_param_num = 1; 1623 long param[1] = {0}; 1624 uint8_t param_nums = 0; 1625 1626 if (size == 0) 1627 return -EINVAL; 1628 1629 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 1630 1631 if (!wr_buf) { 1632 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); 1633 return -ENOSPC; 1634 } 1635 1636 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 1637 (long *)param, buf, 1638 max_param_num, 1639 ¶m_nums)) { 1640 kfree(wr_buf); 1641 return -EINVAL; 1642 } 1643 1644 if (param_nums <= 0) { 1645 DRM_DEBUG_DRIVER("user data not be read\n"); 1646 kfree(wr_buf); 1647 return -EINVAL; 1648 } 1649 1650 for (i = 0; i < MAX_PIPES; i++) { 1651 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1652 if (pipe_ctx->stream && 1653 pipe_ctx->stream->link == aconnector->dc_link && 1654 pipe_ctx->stream->sink && 1655 pipe_ctx->stream->sink == aconnector->dc_sink) 1656 break; 1657 } 1658 1659 if (!pipe_ctx->stream) 1660 goto done; 1661 1662 // Get CRTC state 1663 mutex_lock(&dev->mode_config.mutex); 1664 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1665 1666 if (connector->state == NULL) 1667 goto unlock; 1668 1669 crtc = connector->state->crtc; 1670 if (crtc == NULL) 1671 goto unlock; 1672 1673 drm_modeset_lock(&crtc->mutex, NULL); 1674 if (crtc->state == NULL) 1675 goto unlock; 1676 1677 dm_crtc_state = to_dm_crtc_state(crtc->state); 1678 if (dm_crtc_state->stream == NULL) 1679 goto unlock; 1680 1681 if (param[0] == 1) 1682 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_ENABLE; 1683 else if (param[0] == 2) 1684 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DISABLE; 1685 else 1686 aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DEFAULT; 1687 1688 dm_crtc_state->dsc_force_changed = true; 1689 1690 unlock: 1691 if (crtc) 1692 drm_modeset_unlock(&crtc->mutex); 1693 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1694 mutex_unlock(&dev->mode_config.mutex); 1695 1696 done: 1697 kfree(wr_buf); 1698 return size; 1699 } 1700 1701 /* function: read DSC slice width parameter on the connector 1702 * 1703 * The read function: dp_dsc_slice_width_read 1704 * returns dsc slice width used in the current configuration 1705 * The return is an integer: 0 or other positive number 1706 * 1707 * Access the status with the following command: 1708 * 1709 * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_width 1710 * 1711 * 0 - means that DSC is disabled 1712 * 1713 * Any other number more than zero represents the 1714 * slice width currently used by DSC in pixels 1715 * 1716 */ 1717 static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, 1718 size_t size, loff_t *pos) 1719 { 1720 char *rd_buf = NULL; 1721 char *rd_buf_ptr = NULL; 1722 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 1723 struct display_stream_compressor *dsc; 1724 struct dcn_dsc_state dsc_state = {0}; 1725 const uint32_t rd_buf_size = 100; 1726 struct pipe_ctx *pipe_ctx; 1727 ssize_t result = 0; 1728 int i, r, str_len = 30; 1729 1730 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 1731 1732 if (!rd_buf) 1733 return -ENOMEM; 1734 1735 rd_buf_ptr = rd_buf; 1736 1737 for (i = 0; i < MAX_PIPES; i++) { 1738 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1739 if (pipe_ctx->stream && 1740 pipe_ctx->stream->link == aconnector->dc_link && 1741 pipe_ctx->stream->sink && 1742 pipe_ctx->stream->sink == aconnector->dc_sink) 1743 break; 1744 } 1745 1746 dsc = pipe_ctx->stream_res.dsc; 1747 if (dsc) 1748 dsc->funcs->dsc_read_state(dsc, &dsc_state); 1749 1750 snprintf(rd_buf_ptr, str_len, 1751 "%d\n", 1752 dsc_state.dsc_slice_width); 1753 rd_buf_ptr += str_len; 1754 1755 while (size) { 1756 if (*pos >= rd_buf_size) 1757 break; 1758 1759 r = put_user(*(rd_buf + result), buf); 1760 if (r) { 1761 kfree(rd_buf); 1762 return r; /* r = -EFAULT */ 1763 } 1764 1765 buf += 1; 1766 size -= 1; 1767 *pos += 1; 1768 result += 1; 1769 } 1770 1771 kfree(rd_buf); 1772 return result; 1773 } 1774 1775 /* function: write DSC slice width parameter 1776 * 1777 * The write function: dp_dsc_slice_width_write 1778 * overwrites automatically generated DSC configuration 1779 * of slice width. 1780 * 1781 * The user has to write the slice width divisible by the 1782 * picture width. 1783 * 1784 * Also the user has to write width in hexidecimal 1785 * rather than in decimal. 1786 * 1787 * Writing DSC settings is done with the following command: 1788 * - To force overwrite slice width: (example sets to 1920 pixels) 1789 * 1790 * echo 0x780 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_width 1791 * 1792 * - To stop overwriting and let driver find the optimal size, 1793 * set the width to zero: 1794 * 1795 * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_width 1796 * 1797 */ 1798 static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, 1799 size_t size, loff_t *pos) 1800 { 1801 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 1802 struct pipe_ctx *pipe_ctx; 1803 struct drm_connector *connector = &aconnector->base; 1804 struct drm_device *dev = connector->dev; 1805 struct drm_crtc *crtc = NULL; 1806 struct dm_crtc_state *dm_crtc_state = NULL; 1807 int i; 1808 char *wr_buf = NULL; 1809 uint32_t wr_buf_size = 42; 1810 int max_param_num = 1; 1811 long param[1] = {0}; 1812 uint8_t param_nums = 0; 1813 1814 if (size == 0) 1815 return -EINVAL; 1816 1817 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 1818 1819 if (!wr_buf) { 1820 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); 1821 return -ENOSPC; 1822 } 1823 1824 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 1825 (long *)param, buf, 1826 max_param_num, 1827 ¶m_nums)) { 1828 kfree(wr_buf); 1829 return -EINVAL; 1830 } 1831 1832 if (param_nums <= 0) { 1833 DRM_DEBUG_DRIVER("user data not be read\n"); 1834 kfree(wr_buf); 1835 return -EINVAL; 1836 } 1837 1838 for (i = 0; i < MAX_PIPES; i++) { 1839 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1840 if (pipe_ctx->stream && 1841 pipe_ctx->stream->link == aconnector->dc_link && 1842 pipe_ctx->stream->sink && 1843 pipe_ctx->stream->sink == aconnector->dc_sink) 1844 break; 1845 } 1846 1847 if (!pipe_ctx->stream) 1848 goto done; 1849 1850 // Safely get CRTC state 1851 mutex_lock(&dev->mode_config.mutex); 1852 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1853 1854 if (connector->state == NULL) 1855 goto unlock; 1856 1857 crtc = connector->state->crtc; 1858 if (crtc == NULL) 1859 goto unlock; 1860 1861 drm_modeset_lock(&crtc->mutex, NULL); 1862 if (crtc->state == NULL) 1863 goto unlock; 1864 1865 dm_crtc_state = to_dm_crtc_state(crtc->state); 1866 if (dm_crtc_state->stream == NULL) 1867 goto unlock; 1868 1869 if (param[0] > 0) 1870 aconnector->dsc_settings.dsc_num_slices_h = DIV_ROUND_UP( 1871 pipe_ctx->stream->timing.h_addressable, 1872 param[0]); 1873 else 1874 aconnector->dsc_settings.dsc_num_slices_h = 0; 1875 1876 dm_crtc_state->dsc_force_changed = true; 1877 1878 unlock: 1879 if (crtc) 1880 drm_modeset_unlock(&crtc->mutex); 1881 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1882 mutex_unlock(&dev->mode_config.mutex); 1883 1884 done: 1885 kfree(wr_buf); 1886 return size; 1887 } 1888 1889 /* function: read DSC slice height parameter on the connector 1890 * 1891 * The read function: dp_dsc_slice_height_read 1892 * returns dsc slice height used in the current configuration 1893 * The return is an integer: 0 or other positive number 1894 * 1895 * Access the status with the following command: 1896 * 1897 * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_height 1898 * 1899 * 0 - means that DSC is disabled 1900 * 1901 * Any other number more than zero represents the 1902 * slice height currently used by DSC in pixels 1903 * 1904 */ 1905 static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, 1906 size_t size, loff_t *pos) 1907 { 1908 char *rd_buf = NULL; 1909 char *rd_buf_ptr = NULL; 1910 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 1911 struct display_stream_compressor *dsc; 1912 struct dcn_dsc_state dsc_state = {0}; 1913 const uint32_t rd_buf_size = 100; 1914 struct pipe_ctx *pipe_ctx; 1915 ssize_t result = 0; 1916 int i, r, str_len = 30; 1917 1918 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 1919 1920 if (!rd_buf) 1921 return -ENOMEM; 1922 1923 rd_buf_ptr = rd_buf; 1924 1925 for (i = 0; i < MAX_PIPES; i++) { 1926 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1927 if (pipe_ctx->stream && 1928 pipe_ctx->stream->link == aconnector->dc_link && 1929 pipe_ctx->stream->sink && 1930 pipe_ctx->stream->sink == aconnector->dc_sink) 1931 break; 1932 } 1933 1934 dsc = pipe_ctx->stream_res.dsc; 1935 if (dsc) 1936 dsc->funcs->dsc_read_state(dsc, &dsc_state); 1937 1938 snprintf(rd_buf_ptr, str_len, 1939 "%d\n", 1940 dsc_state.dsc_slice_height); 1941 rd_buf_ptr += str_len; 1942 1943 while (size) { 1944 if (*pos >= rd_buf_size) 1945 break; 1946 1947 r = put_user(*(rd_buf + result), buf); 1948 if (r) { 1949 kfree(rd_buf); 1950 return r; /* r = -EFAULT */ 1951 } 1952 1953 buf += 1; 1954 size -= 1; 1955 *pos += 1; 1956 result += 1; 1957 } 1958 1959 kfree(rd_buf); 1960 return result; 1961 } 1962 1963 /* function: write DSC slice height parameter 1964 * 1965 * The write function: dp_dsc_slice_height_write 1966 * overwrites automatically generated DSC configuration 1967 * of slice height. 1968 * 1969 * The user has to write the slice height divisible by the 1970 * picture height. 1971 * 1972 * Also the user has to write height in hexidecimal 1973 * rather than in decimal. 1974 * 1975 * Writing DSC settings is done with the following command: 1976 * - To force overwrite slice height (example sets to 128 pixels): 1977 * 1978 * echo 0x80 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_height 1979 * 1980 * - To stop overwriting and let driver find the optimal size, 1981 * set the height to zero: 1982 * 1983 * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_height 1984 * 1985 */ 1986 static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, 1987 size_t size, loff_t *pos) 1988 { 1989 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 1990 struct drm_connector *connector = &aconnector->base; 1991 struct drm_device *dev = connector->dev; 1992 struct drm_crtc *crtc = NULL; 1993 struct dm_crtc_state *dm_crtc_state = NULL; 1994 struct pipe_ctx *pipe_ctx; 1995 int i; 1996 char *wr_buf = NULL; 1997 uint32_t wr_buf_size = 42; 1998 int max_param_num = 1; 1999 uint8_t param_nums = 0; 2000 long param[1] = {0}; 2001 2002 if (size == 0) 2003 return -EINVAL; 2004 2005 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 2006 2007 if (!wr_buf) { 2008 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); 2009 return -ENOSPC; 2010 } 2011 2012 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 2013 (long *)param, buf, 2014 max_param_num, 2015 ¶m_nums)) { 2016 kfree(wr_buf); 2017 return -EINVAL; 2018 } 2019 2020 if (param_nums <= 0) { 2021 DRM_DEBUG_DRIVER("user data not be read\n"); 2022 kfree(wr_buf); 2023 return -EINVAL; 2024 } 2025 2026 for (i = 0; i < MAX_PIPES; i++) { 2027 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2028 if (pipe_ctx->stream && 2029 pipe_ctx->stream->link == aconnector->dc_link && 2030 pipe_ctx->stream->sink && 2031 pipe_ctx->stream->sink == aconnector->dc_sink) 2032 break; 2033 } 2034 2035 if (!pipe_ctx->stream) 2036 goto done; 2037 2038 // Get CRTC state 2039 mutex_lock(&dev->mode_config.mutex); 2040 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 2041 2042 if (connector->state == NULL) 2043 goto unlock; 2044 2045 crtc = connector->state->crtc; 2046 if (crtc == NULL) 2047 goto unlock; 2048 2049 drm_modeset_lock(&crtc->mutex, NULL); 2050 if (crtc->state == NULL) 2051 goto unlock; 2052 2053 dm_crtc_state = to_dm_crtc_state(crtc->state); 2054 if (dm_crtc_state->stream == NULL) 2055 goto unlock; 2056 2057 if (param[0] > 0) 2058 aconnector->dsc_settings.dsc_num_slices_v = DIV_ROUND_UP( 2059 pipe_ctx->stream->timing.v_addressable, 2060 param[0]); 2061 else 2062 aconnector->dsc_settings.dsc_num_slices_v = 0; 2063 2064 dm_crtc_state->dsc_force_changed = true; 2065 2066 unlock: 2067 if (crtc) 2068 drm_modeset_unlock(&crtc->mutex); 2069 drm_modeset_unlock(&dev->mode_config.connection_mutex); 2070 mutex_unlock(&dev->mode_config.mutex); 2071 2072 done: 2073 kfree(wr_buf); 2074 return size; 2075 } 2076 2077 /* function: read DSC target rate on the connector in bits per pixel 2078 * 2079 * The read function: dp_dsc_bits_per_pixel_read 2080 * returns target rate of compression in bits per pixel 2081 * The return is an integer: 0 or other positive integer 2082 * 2083 * Access it with the following command: 2084 * 2085 * cat /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel 2086 * 2087 * 0 - means that DSC is disabled 2088 */ 2089 static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, 2090 size_t size, loff_t *pos) 2091 { 2092 char *rd_buf = NULL; 2093 char *rd_buf_ptr = NULL; 2094 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 2095 struct display_stream_compressor *dsc; 2096 struct dcn_dsc_state dsc_state = {0}; 2097 const uint32_t rd_buf_size = 100; 2098 struct pipe_ctx *pipe_ctx; 2099 ssize_t result = 0; 2100 int i, r, str_len = 30; 2101 2102 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 2103 2104 if (!rd_buf) 2105 return -ENOMEM; 2106 2107 rd_buf_ptr = rd_buf; 2108 2109 for (i = 0; i < MAX_PIPES; i++) { 2110 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2111 if (pipe_ctx->stream && 2112 pipe_ctx->stream->link == aconnector->dc_link && 2113 pipe_ctx->stream->sink && 2114 pipe_ctx->stream->sink == aconnector->dc_sink) 2115 break; 2116 } 2117 2118 dsc = pipe_ctx->stream_res.dsc; 2119 if (dsc) 2120 dsc->funcs->dsc_read_state(dsc, &dsc_state); 2121 2122 snprintf(rd_buf_ptr, str_len, 2123 "%d\n", 2124 dsc_state.dsc_bits_per_pixel); 2125 rd_buf_ptr += str_len; 2126 2127 while (size) { 2128 if (*pos >= rd_buf_size) 2129 break; 2130 2131 r = put_user(*(rd_buf + result), buf); 2132 if (r) { 2133 kfree(rd_buf); 2134 return r; /* r = -EFAULT */ 2135 } 2136 2137 buf += 1; 2138 size -= 1; 2139 *pos += 1; 2140 result += 1; 2141 } 2142 2143 kfree(rd_buf); 2144 return result; 2145 } 2146 2147 /* function: write DSC target rate in bits per pixel 2148 * 2149 * The write function: dp_dsc_bits_per_pixel_write 2150 * overwrites automatically generated DSC configuration 2151 * of DSC target bit rate. 2152 * 2153 * Also the user has to write bpp in hexidecimal 2154 * rather than in decimal. 2155 * 2156 * Writing DSC settings is done with the following command: 2157 * - To force overwrite rate (example sets to 256 bpp x 1/16): 2158 * 2159 * echo 0x100 > /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel 2160 * 2161 * - To stop overwriting and let driver find the optimal rate, 2162 * set the rate to zero: 2163 * 2164 * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel 2165 * 2166 */ 2167 static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *buf, 2168 size_t size, loff_t *pos) 2169 { 2170 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 2171 struct drm_connector *connector = &aconnector->base; 2172 struct drm_device *dev = connector->dev; 2173 struct drm_crtc *crtc = NULL; 2174 struct dm_crtc_state *dm_crtc_state = NULL; 2175 struct pipe_ctx *pipe_ctx; 2176 int i; 2177 char *wr_buf = NULL; 2178 uint32_t wr_buf_size = 42; 2179 int max_param_num = 1; 2180 uint8_t param_nums = 0; 2181 long param[1] = {0}; 2182 2183 if (size == 0) 2184 return -EINVAL; 2185 2186 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 2187 2188 if (!wr_buf) { 2189 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); 2190 return -ENOSPC; 2191 } 2192 2193 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 2194 (long *)param, buf, 2195 max_param_num, 2196 ¶m_nums)) { 2197 kfree(wr_buf); 2198 return -EINVAL; 2199 } 2200 2201 if (param_nums <= 0) { 2202 DRM_DEBUG_DRIVER("user data not be read\n"); 2203 kfree(wr_buf); 2204 return -EINVAL; 2205 } 2206 2207 for (i = 0; i < MAX_PIPES; i++) { 2208 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2209 if (pipe_ctx->stream && 2210 pipe_ctx->stream->link == aconnector->dc_link && 2211 pipe_ctx->stream->sink && 2212 pipe_ctx->stream->sink == aconnector->dc_sink) 2213 break; 2214 } 2215 2216 if (!pipe_ctx->stream) 2217 goto done; 2218 2219 // Get CRTC state 2220 mutex_lock(&dev->mode_config.mutex); 2221 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 2222 2223 if (connector->state == NULL) 2224 goto unlock; 2225 2226 crtc = connector->state->crtc; 2227 if (crtc == NULL) 2228 goto unlock; 2229 2230 drm_modeset_lock(&crtc->mutex, NULL); 2231 if (crtc->state == NULL) 2232 goto unlock; 2233 2234 dm_crtc_state = to_dm_crtc_state(crtc->state); 2235 if (dm_crtc_state->stream == NULL) 2236 goto unlock; 2237 2238 aconnector->dsc_settings.dsc_bits_per_pixel = param[0]; 2239 2240 dm_crtc_state->dsc_force_changed = true; 2241 2242 unlock: 2243 if (crtc) 2244 drm_modeset_unlock(&crtc->mutex); 2245 drm_modeset_unlock(&dev->mode_config.connection_mutex); 2246 mutex_unlock(&dev->mode_config.mutex); 2247 2248 done: 2249 kfree(wr_buf); 2250 return size; 2251 } 2252 2253 /* function: read DSC picture width parameter on the connector 2254 * 2255 * The read function: dp_dsc_pic_width_read 2256 * returns dsc picture width used in the current configuration 2257 * It is the same as h_addressable of the current 2258 * display's timing 2259 * The return is an integer: 0 or other positive integer 2260 * If 0 then DSC is disabled. 2261 * 2262 * Access it with the following command: 2263 * 2264 * cat /sys/kernel/debug/dri/0/DP-X/dsc_pic_width 2265 * 2266 * 0 - means that DSC is disabled 2267 */ 2268 static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, 2269 size_t size, loff_t *pos) 2270 { 2271 char *rd_buf = NULL; 2272 char *rd_buf_ptr = NULL; 2273 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 2274 struct display_stream_compressor *dsc; 2275 struct dcn_dsc_state dsc_state = {0}; 2276 const uint32_t rd_buf_size = 100; 2277 struct pipe_ctx *pipe_ctx; 2278 ssize_t result = 0; 2279 int i, r, str_len = 30; 2280 2281 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 2282 2283 if (!rd_buf) 2284 return -ENOMEM; 2285 2286 rd_buf_ptr = rd_buf; 2287 2288 for (i = 0; i < MAX_PIPES; i++) { 2289 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2290 if (pipe_ctx->stream && 2291 pipe_ctx->stream->link == aconnector->dc_link && 2292 pipe_ctx->stream->sink && 2293 pipe_ctx->stream->sink == aconnector->dc_sink) 2294 break; 2295 } 2296 2297 dsc = pipe_ctx->stream_res.dsc; 2298 if (dsc) 2299 dsc->funcs->dsc_read_state(dsc, &dsc_state); 2300 2301 snprintf(rd_buf_ptr, str_len, 2302 "%d\n", 2303 dsc_state.dsc_pic_width); 2304 rd_buf_ptr += str_len; 2305 2306 while (size) { 2307 if (*pos >= rd_buf_size) 2308 break; 2309 2310 r = put_user(*(rd_buf + result), buf); 2311 if (r) { 2312 kfree(rd_buf); 2313 return r; /* r = -EFAULT */ 2314 } 2315 2316 buf += 1; 2317 size -= 1; 2318 *pos += 1; 2319 result += 1; 2320 } 2321 2322 kfree(rd_buf); 2323 return result; 2324 } 2325 2326 static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, 2327 size_t size, loff_t *pos) 2328 { 2329 char *rd_buf = NULL; 2330 char *rd_buf_ptr = NULL; 2331 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 2332 struct display_stream_compressor *dsc; 2333 struct dcn_dsc_state dsc_state = {0}; 2334 const uint32_t rd_buf_size = 100; 2335 struct pipe_ctx *pipe_ctx; 2336 ssize_t result = 0; 2337 int i, r, str_len = 30; 2338 2339 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 2340 2341 if (!rd_buf) 2342 return -ENOMEM; 2343 2344 rd_buf_ptr = rd_buf; 2345 2346 for (i = 0; i < MAX_PIPES; i++) { 2347 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2348 if (pipe_ctx->stream && 2349 pipe_ctx->stream->link == aconnector->dc_link && 2350 pipe_ctx->stream->sink && 2351 pipe_ctx->stream->sink == aconnector->dc_sink) 2352 break; 2353 } 2354 2355 dsc = pipe_ctx->stream_res.dsc; 2356 if (dsc) 2357 dsc->funcs->dsc_read_state(dsc, &dsc_state); 2358 2359 snprintf(rd_buf_ptr, str_len, 2360 "%d\n", 2361 dsc_state.dsc_pic_height); 2362 rd_buf_ptr += str_len; 2363 2364 while (size) { 2365 if (*pos >= rd_buf_size) 2366 break; 2367 2368 r = put_user(*(rd_buf + result), buf); 2369 if (r) { 2370 kfree(rd_buf); 2371 return r; /* r = -EFAULT */ 2372 } 2373 2374 buf += 1; 2375 size -= 1; 2376 *pos += 1; 2377 result += 1; 2378 } 2379 2380 kfree(rd_buf); 2381 return result; 2382 } 2383 2384 /* function: read DSC chunk size parameter on the connector 2385 * 2386 * The read function: dp_dsc_chunk_size_read 2387 * returns dsc chunk size set in the current configuration 2388 * The value is calculated automatically by DSC code 2389 * and depends on slice parameters and bpp target rate 2390 * The return is an integer: 0 or other positive integer 2391 * If 0 then DSC is disabled. 2392 * 2393 * Access it with the following command: 2394 * 2395 * cat /sys/kernel/debug/dri/0/DP-X/dsc_chunk_size 2396 * 2397 * 0 - means that DSC is disabled 2398 */ 2399 static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, 2400 size_t size, loff_t *pos) 2401 { 2402 char *rd_buf = NULL; 2403 char *rd_buf_ptr = NULL; 2404 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 2405 struct display_stream_compressor *dsc; 2406 struct dcn_dsc_state dsc_state = {0}; 2407 const uint32_t rd_buf_size = 100; 2408 struct pipe_ctx *pipe_ctx; 2409 ssize_t result = 0; 2410 int i, r, str_len = 30; 2411 2412 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 2413 2414 if (!rd_buf) 2415 return -ENOMEM; 2416 2417 rd_buf_ptr = rd_buf; 2418 2419 for (i = 0; i < MAX_PIPES; i++) { 2420 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2421 if (pipe_ctx->stream && 2422 pipe_ctx->stream->link == aconnector->dc_link && 2423 pipe_ctx->stream->sink && 2424 pipe_ctx->stream->sink == aconnector->dc_sink) 2425 break; 2426 } 2427 2428 dsc = pipe_ctx->stream_res.dsc; 2429 if (dsc) 2430 dsc->funcs->dsc_read_state(dsc, &dsc_state); 2431 2432 snprintf(rd_buf_ptr, str_len, 2433 "%d\n", 2434 dsc_state.dsc_chunk_size); 2435 rd_buf_ptr += str_len; 2436 2437 while (size) { 2438 if (*pos >= rd_buf_size) 2439 break; 2440 2441 r = put_user(*(rd_buf + result), buf); 2442 if (r) { 2443 kfree(rd_buf); 2444 return r; /* r = -EFAULT */ 2445 } 2446 2447 buf += 1; 2448 size -= 1; 2449 *pos += 1; 2450 result += 1; 2451 } 2452 2453 kfree(rd_buf); 2454 return result; 2455 } 2456 2457 /* function: read DSC slice bpg offset on the connector 2458 * 2459 * The read function: dp_dsc_slice_bpg_offset_read 2460 * returns dsc bpg slice offset set in the current configuration 2461 * The value is calculated automatically by DSC code 2462 * and depends on slice parameters and bpp target rate 2463 * The return is an integer: 0 or other positive integer 2464 * If 0 then DSC is disabled. 2465 * 2466 * Access it with the following command: 2467 * 2468 * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_bpg_offset 2469 * 2470 * 0 - means that DSC is disabled 2471 */ 2472 static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, 2473 size_t size, loff_t *pos) 2474 { 2475 char *rd_buf = NULL; 2476 char *rd_buf_ptr = NULL; 2477 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 2478 struct display_stream_compressor *dsc; 2479 struct dcn_dsc_state dsc_state = {0}; 2480 const uint32_t rd_buf_size = 100; 2481 struct pipe_ctx *pipe_ctx; 2482 ssize_t result = 0; 2483 int i, r, str_len = 30; 2484 2485 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 2486 2487 if (!rd_buf) 2488 return -ENOMEM; 2489 2490 rd_buf_ptr = rd_buf; 2491 2492 for (i = 0; i < MAX_PIPES; i++) { 2493 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2494 if (pipe_ctx->stream && 2495 pipe_ctx->stream->link == aconnector->dc_link && 2496 pipe_ctx->stream->sink && 2497 pipe_ctx->stream->sink == aconnector->dc_sink) 2498 break; 2499 } 2500 2501 dsc = pipe_ctx->stream_res.dsc; 2502 if (dsc) 2503 dsc->funcs->dsc_read_state(dsc, &dsc_state); 2504 2505 snprintf(rd_buf_ptr, str_len, 2506 "%d\n", 2507 dsc_state.dsc_slice_bpg_offset); 2508 rd_buf_ptr += str_len; 2509 2510 while (size) { 2511 if (*pos >= rd_buf_size) 2512 break; 2513 2514 r = put_user(*(rd_buf + result), buf); 2515 if (r) { 2516 kfree(rd_buf); 2517 return r; /* r = -EFAULT */ 2518 } 2519 2520 buf += 1; 2521 size -= 1; 2522 *pos += 1; 2523 result += 1; 2524 } 2525 2526 kfree(rd_buf); 2527 return result; 2528 } 2529 2530 2531 /* 2532 * function description: Read max_requested_bpc property from the connector 2533 * 2534 * Access it with the following command: 2535 * 2536 * cat /sys/kernel/debug/dri/0/DP-X/max_bpc 2537 * 2538 */ 2539 static ssize_t dp_max_bpc_read(struct file *f, char __user *buf, 2540 size_t size, loff_t *pos) 2541 { 2542 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 2543 struct drm_connector *connector = &aconnector->base; 2544 struct drm_device *dev = connector->dev; 2545 struct dm_connector_state *state; 2546 ssize_t result = 0; 2547 char *rd_buf = NULL; 2548 char *rd_buf_ptr = NULL; 2549 const uint32_t rd_buf_size = 10; 2550 int r; 2551 2552 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 2553 2554 if (!rd_buf) 2555 return -ENOMEM; 2556 2557 mutex_lock(&dev->mode_config.mutex); 2558 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 2559 2560 if (connector->state == NULL) 2561 goto unlock; 2562 2563 state = to_dm_connector_state(connector->state); 2564 2565 rd_buf_ptr = rd_buf; 2566 snprintf(rd_buf_ptr, rd_buf_size, 2567 "%u\n", 2568 state->base.max_requested_bpc); 2569 2570 while (size) { 2571 if (*pos >= rd_buf_size) 2572 break; 2573 2574 r = put_user(*(rd_buf + result), buf); 2575 if (r) { 2576 result = r; /* r = -EFAULT */ 2577 goto unlock; 2578 } 2579 buf += 1; 2580 size -= 1; 2581 *pos += 1; 2582 result += 1; 2583 } 2584 unlock: 2585 drm_modeset_unlock(&dev->mode_config.connection_mutex); 2586 mutex_unlock(&dev->mode_config.mutex); 2587 kfree(rd_buf); 2588 return result; 2589 } 2590 2591 2592 /* 2593 * function description: Set max_requested_bpc property on the connector 2594 * 2595 * This function will not force the input BPC on connector, it will only 2596 * change the max value. This is equivalent to setting max_bpc through 2597 * xrandr. 2598 * 2599 * The BPC value written must be >= 6 and <= 16. Values outside of this 2600 * range will result in errors. 2601 * 2602 * BPC values: 2603 * 0x6 - 6 BPC 2604 * 0x8 - 8 BPC 2605 * 0xa - 10 BPC 2606 * 0xc - 12 BPC 2607 * 0x10 - 16 BPC 2608 * 2609 * Write the max_bpc in the following way: 2610 * 2611 * echo 0x6 > /sys/kernel/debug/dri/0/DP-X/max_bpc 2612 * 2613 */ 2614 static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf, 2615 size_t size, loff_t *pos) 2616 { 2617 struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; 2618 struct drm_connector *connector = &aconnector->base; 2619 struct dm_connector_state *state; 2620 struct drm_device *dev = connector->dev; 2621 char *wr_buf = NULL; 2622 uint32_t wr_buf_size = 42; 2623 int max_param_num = 1; 2624 long param[1] = {0}; 2625 uint8_t param_nums = 0; 2626 2627 if (size == 0) 2628 return -EINVAL; 2629 2630 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 2631 2632 if (!wr_buf) { 2633 DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); 2634 return -ENOSPC; 2635 } 2636 2637 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 2638 (long *)param, buf, 2639 max_param_num, 2640 ¶m_nums)) { 2641 kfree(wr_buf); 2642 return -EINVAL; 2643 } 2644 2645 if (param_nums <= 0) { 2646 DRM_DEBUG_DRIVER("user data not be read\n"); 2647 kfree(wr_buf); 2648 return -EINVAL; 2649 } 2650 2651 if (param[0] < 6 || param[0] > 16) { 2652 DRM_DEBUG_DRIVER("bad max_bpc value\n"); 2653 kfree(wr_buf); 2654 return -EINVAL; 2655 } 2656 2657 mutex_lock(&dev->mode_config.mutex); 2658 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 2659 2660 if (connector->state == NULL) 2661 goto unlock; 2662 2663 state = to_dm_connector_state(connector->state); 2664 state->base.max_requested_bpc = param[0]; 2665 unlock: 2666 drm_modeset_unlock(&dev->mode_config.connection_mutex); 2667 mutex_unlock(&dev->mode_config.mutex); 2668 2669 kfree(wr_buf); 2670 return size; 2671 } 2672 2673 /* 2674 * IPS status. Read only. 2675 * 2676 * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_ips_status 2677 */ 2678 static int ips_status_show(struct seq_file *m, void *unused) 2679 { 2680 struct amdgpu_device *adev = m->private; 2681 struct dc *dc = adev->dm.dc; 2682 struct dc_dmub_srv *dc_dmub_srv; 2683 2684 seq_printf(m, "IPS config: %d\n", dc->config.disable_ips); 2685 seq_printf(m, "Idle optimization: %d\n", dc->idle_optimizations_allowed); 2686 2687 if (adev->dm.idle_workqueue) { 2688 seq_printf(m, "Idle workqueue - enabled: %d\n", adev->dm.idle_workqueue->enable); 2689 seq_printf(m, "Idle workqueue - running: %d\n", adev->dm.idle_workqueue->running); 2690 } 2691 2692 dc_dmub_srv = dc->ctx->dmub_srv; 2693 if (dc_dmub_srv && dc_dmub_srv->dmub) { 2694 uint32_t rcg_count, ips1_count, ips2_count; 2695 volatile const struct dmub_shared_state_ips_fw *ips_fw = 2696 &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; 2697 rcg_count = ips_fw->rcg_entry_count; 2698 ips1_count = ips_fw->ips1_entry_count; 2699 ips2_count = ips_fw->ips2_entry_count; 2700 seq_printf(m, "entry counts: rcg=%u ips1=%u ips2=%u\n", 2701 rcg_count, 2702 ips1_count, 2703 ips2_count); 2704 rcg_count = ips_fw->rcg_exit_count; 2705 ips1_count = ips_fw->ips1_exit_count; 2706 ips2_count = ips_fw->ips2_exit_count; 2707 seq_printf(m, "exit counts: rcg=%u ips1=%u ips2=%u", 2708 rcg_count, 2709 ips1_count, 2710 ips2_count); 2711 seq_puts(m, "\n"); 2712 } 2713 return 0; 2714 } 2715 2716 /* 2717 * Backlight at this moment. Read only. 2718 * As written to display, taking ABM and backlight lut into account. 2719 * Ranges from 0x0 to 0x10000 (= 100% PWM) 2720 * 2721 * Example usage: cat /sys/kernel/debug/dri/0/eDP-1/current_backlight 2722 */ 2723 static int current_backlight_show(struct seq_file *m, void *unused) 2724 { 2725 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); 2726 struct dc_link *link = aconnector->dc_link; 2727 unsigned int backlight; 2728 2729 backlight = dc_link_get_backlight_level(link); 2730 seq_printf(m, "0x%x\n", backlight); 2731 2732 return 0; 2733 } 2734 2735 /* 2736 * Backlight value that is being approached. Read only. 2737 * As written to display, taking ABM and backlight lut into account. 2738 * Ranges from 0x0 to 0x10000 (= 100% PWM) 2739 * 2740 * Example usage: cat /sys/kernel/debug/dri/0/eDP-1/target_backlight 2741 */ 2742 static int target_backlight_show(struct seq_file *m, void *unused) 2743 { 2744 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); 2745 struct dc_link *link = aconnector->dc_link; 2746 unsigned int backlight; 2747 2748 backlight = dc_link_get_target_backlight_pwm(link); 2749 seq_printf(m, "0x%x\n", backlight); 2750 2751 return 0; 2752 } 2753 2754 /* 2755 * function description: Determine if the connector is mst connector 2756 * 2757 * This function helps to determine whether a connector is a mst connector. 2758 * - "root" stands for the root connector of the topology 2759 * - "branch" stands for branch device of the topology 2760 * - "end" stands for leaf node connector of the topology 2761 * - "no" stands for the connector is not a device of a mst topology 2762 * Access it with the following command: 2763 * 2764 * cat /sys/kernel/debug/dri/0/DP-X/is_mst_connector 2765 * 2766 */ 2767 static int dp_is_mst_connector_show(struct seq_file *m, void *unused) 2768 { 2769 struct drm_connector *connector = m->private; 2770 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 2771 struct drm_dp_mst_topology_mgr *mgr = NULL; 2772 struct drm_dp_mst_port *port = NULL; 2773 char *role = NULL; 2774 2775 mutex_lock(&aconnector->hpd_lock); 2776 2777 if (aconnector->mst_mgr.mst_state) { 2778 role = "root"; 2779 } else if (aconnector->mst_root && 2780 aconnector->mst_root->mst_mgr.mst_state) { 2781 2782 role = "end"; 2783 2784 mgr = &aconnector->mst_root->mst_mgr; 2785 port = aconnector->mst_output_port; 2786 2787 drm_modeset_lock(&mgr->base.lock, NULL); 2788 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && 2789 port->mcs) 2790 role = "branch"; 2791 drm_modeset_unlock(&mgr->base.lock); 2792 2793 } else { 2794 role = "no"; 2795 } 2796 2797 seq_printf(m, "%s\n", role); 2798 2799 mutex_unlock(&aconnector->hpd_lock); 2800 2801 return 0; 2802 } 2803 2804 /* 2805 * function description: Read out the mst progress status 2806 * 2807 * This function helps to determine the mst progress status of 2808 * a mst connector. 2809 * 2810 * Access it with the following command: 2811 * 2812 * cat /sys/kernel/debug/dri/0/DP-X/mst_progress_status 2813 * 2814 */ 2815 static int dp_mst_progress_status_show(struct seq_file *m, void *unused) 2816 { 2817 struct drm_connector *connector = m->private; 2818 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 2819 struct amdgpu_device *adev = drm_to_adev(connector->dev); 2820 int i; 2821 2822 mutex_lock(&aconnector->hpd_lock); 2823 mutex_lock(&adev->dm.dc_lock); 2824 2825 if (aconnector->mst_status == MST_STATUS_DEFAULT) { 2826 seq_puts(m, "disabled\n"); 2827 } else { 2828 for (i = 0; i < sizeof(mst_progress_status)/sizeof(char *); i++) 2829 seq_printf(m, "%s:%s\n", 2830 mst_progress_status[i], 2831 aconnector->mst_status & BIT(i) ? "done" : "not_done"); 2832 } 2833 2834 mutex_unlock(&adev->dm.dc_lock); 2835 mutex_unlock(&aconnector->hpd_lock); 2836 2837 return 0; 2838 } 2839 2840 /* 2841 * Reports whether the connected display is a USB4 DPIA tunneled display 2842 * Example usage: cat /sys/kernel/debug/dri/0/DP-8/is_dpia_link 2843 */ 2844 static int is_dpia_link_show(struct seq_file *m, void *data) 2845 { 2846 struct drm_connector *connector = m->private; 2847 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 2848 struct dc_link *link = aconnector->dc_link; 2849 2850 if (connector->status != connector_status_connected) 2851 return -ENODEV; 2852 2853 seq_printf(m, "%s\n", (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? "yes" : 2854 (link->ep_type == DISPLAY_ENDPOINT_PHY) ? "no" : "unknown"); 2855 2856 return 0; 2857 } 2858 2859 DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support); 2860 DEFINE_SHOW_ATTRIBUTE(dmub_fw_state); 2861 DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); 2862 DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status); 2863 DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability); 2864 DEFINE_SHOW_ATTRIBUTE(internal_display); 2865 DEFINE_SHOW_ATTRIBUTE(odm_combine_segments); 2866 DEFINE_SHOW_ATTRIBUTE(replay_capability); 2867 DEFINE_SHOW_ATTRIBUTE(psr_capability); 2868 DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector); 2869 DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status); 2870 DEFINE_SHOW_ATTRIBUTE(is_dpia_link); 2871 2872 static const struct file_operations dp_dsc_clock_en_debugfs_fops = { 2873 .owner = THIS_MODULE, 2874 .read = dp_dsc_clock_en_read, 2875 .write = dp_dsc_clock_en_write, 2876 .llseek = default_llseek 2877 }; 2878 2879 static const struct file_operations dp_dsc_slice_width_debugfs_fops = { 2880 .owner = THIS_MODULE, 2881 .read = dp_dsc_slice_width_read, 2882 .write = dp_dsc_slice_width_write, 2883 .llseek = default_llseek 2884 }; 2885 2886 static const struct file_operations dp_dsc_slice_height_debugfs_fops = { 2887 .owner = THIS_MODULE, 2888 .read = dp_dsc_slice_height_read, 2889 .write = dp_dsc_slice_height_write, 2890 .llseek = default_llseek 2891 }; 2892 2893 static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = { 2894 .owner = THIS_MODULE, 2895 .read = dp_dsc_bits_per_pixel_read, 2896 .write = dp_dsc_bits_per_pixel_write, 2897 .llseek = default_llseek 2898 }; 2899 2900 static const struct file_operations dp_dsc_pic_width_debugfs_fops = { 2901 .owner = THIS_MODULE, 2902 .read = dp_dsc_pic_width_read, 2903 .llseek = default_llseek 2904 }; 2905 2906 static const struct file_operations dp_dsc_pic_height_debugfs_fops = { 2907 .owner = THIS_MODULE, 2908 .read = dp_dsc_pic_height_read, 2909 .llseek = default_llseek 2910 }; 2911 2912 static const struct file_operations dp_dsc_chunk_size_debugfs_fops = { 2913 .owner = THIS_MODULE, 2914 .read = dp_dsc_chunk_size_read, 2915 .llseek = default_llseek 2916 }; 2917 2918 static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = { 2919 .owner = THIS_MODULE, 2920 .read = dp_dsc_slice_bpg_offset_read, 2921 .llseek = default_llseek 2922 }; 2923 2924 static const struct file_operations trigger_hotplug_debugfs_fops = { 2925 .owner = THIS_MODULE, 2926 .write = trigger_hotplug, 2927 .llseek = default_llseek 2928 }; 2929 2930 static const struct file_operations dp_link_settings_debugfs_fops = { 2931 .owner = THIS_MODULE, 2932 .read = dp_link_settings_read, 2933 .write = dp_link_settings_write, 2934 .llseek = default_llseek 2935 }; 2936 2937 static const struct file_operations dp_phy_settings_debugfs_fop = { 2938 .owner = THIS_MODULE, 2939 .read = dp_phy_settings_read, 2940 .write = dp_phy_settings_write, 2941 .llseek = default_llseek 2942 }; 2943 2944 static const struct file_operations dp_phy_test_pattern_fops = { 2945 .owner = THIS_MODULE, 2946 .write = dp_phy_test_pattern_debugfs_write, 2947 .llseek = default_llseek 2948 }; 2949 2950 static const struct file_operations sdp_message_fops = { 2951 .owner = THIS_MODULE, 2952 .write = dp_sdp_message_debugfs_write, 2953 .llseek = default_llseek 2954 }; 2955 2956 static const struct file_operations dp_max_bpc_debugfs_fops = { 2957 .owner = THIS_MODULE, 2958 .read = dp_max_bpc_read, 2959 .write = dp_max_bpc_write, 2960 .llseek = default_llseek 2961 }; 2962 2963 static const struct file_operations dp_dsc_disable_passthrough_debugfs_fops = { 2964 .owner = THIS_MODULE, 2965 .write = dp_dsc_passthrough_set, 2966 .llseek = default_llseek 2967 }; 2968 2969 static const struct file_operations dp_mst_link_settings_debugfs_fops = { 2970 .owner = THIS_MODULE, 2971 .write = dp_mst_link_setting, 2972 .llseek = default_llseek 2973 }; 2974 2975 static const struct { 2976 char *name; 2977 const struct file_operations *fops; 2978 } dp_debugfs_entries[] = { 2979 {"link_settings", &dp_link_settings_debugfs_fops}, 2980 {"phy_settings", &dp_phy_settings_debugfs_fop}, 2981 {"lttpr_status", &dp_lttpr_status_fops}, 2982 {"test_pattern", &dp_phy_test_pattern_fops}, 2983 {"hdcp_sink_capability", &hdcp_sink_capability_fops}, 2984 {"sdp_message", &sdp_message_fops}, 2985 {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops}, 2986 {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops}, 2987 {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops}, 2988 {"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops}, 2989 {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops}, 2990 {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops}, 2991 {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops}, 2992 {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops}, 2993 {"dp_dsc_fec_support", &dp_dsc_fec_support_fops}, 2994 {"max_bpc", &dp_max_bpc_debugfs_fops}, 2995 {"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops}, 2996 {"is_mst_connector", &dp_is_mst_connector_fops}, 2997 {"mst_progress_status", &dp_mst_progress_status_fops}, 2998 {"is_dpia_link", &is_dpia_link_fops}, 2999 {"mst_link_settings", &dp_mst_link_settings_debugfs_fops} 3000 }; 3001 3002 static const struct { 3003 char *name; 3004 const struct file_operations *fops; 3005 } hdmi_debugfs_entries[] = { 3006 {"hdcp_sink_capability", &hdcp_sink_capability_fops} 3007 }; 3008 3009 /* 3010 * Force YUV420 output if available from the given mode 3011 */ 3012 static int force_yuv420_output_set(void *data, u64 val) 3013 { 3014 struct amdgpu_dm_connector *connector = data; 3015 3016 connector->force_yuv420_output = (bool)val; 3017 3018 return 0; 3019 } 3020 3021 /* 3022 * Check if YUV420 is forced when available from the given mode 3023 */ 3024 static int force_yuv420_output_get(void *data, u64 *val) 3025 { 3026 struct amdgpu_dm_connector *connector = data; 3027 3028 *val = connector->force_yuv420_output; 3029 3030 return 0; 3031 } 3032 3033 DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get, 3034 force_yuv420_output_set, "%llu\n"); 3035 3036 /* 3037 * Read Replay state 3038 */ 3039 static int replay_get_state(void *data, u64 *val) 3040 { 3041 struct amdgpu_dm_connector *connector = data; 3042 struct dc_link *link = connector->dc_link; 3043 uint64_t state = REPLAY_STATE_INVALID; 3044 3045 dc_link_get_replay_state(link, &state); 3046 3047 *val = state; 3048 3049 return 0; 3050 } 3051 3052 /* 3053 * Read PSR state 3054 */ 3055 static int psr_get(void *data, u64 *val) 3056 { 3057 struct amdgpu_dm_connector *connector = data; 3058 struct dc_link *link = connector->dc_link; 3059 enum dc_psr_state state = PSR_STATE0; 3060 3061 dc_link_get_psr_state(link, &state); 3062 3063 *val = state; 3064 3065 return 0; 3066 } 3067 3068 /* 3069 * Read PSR state residency 3070 */ 3071 static int psr_read_residency(void *data, u64 *val) 3072 { 3073 struct amdgpu_dm_connector *connector = data; 3074 struct dc_link *link = connector->dc_link; 3075 u32 residency = 0; 3076 3077 link->dc->link_srv->edp_get_psr_residency(link, &residency); 3078 3079 *val = (u64)residency; 3080 3081 return 0; 3082 } 3083 3084 /* read allow_edp_hotplug_detection */ 3085 static int allow_edp_hotplug_detection_get(void *data, u64 *val) 3086 { 3087 struct amdgpu_dm_connector *aconnector = data; 3088 struct drm_connector *connector = &aconnector->base; 3089 struct drm_device *dev = connector->dev; 3090 struct amdgpu_device *adev = drm_to_adev(dev); 3091 3092 *val = adev->dm.dc->config.allow_edp_hotplug_detection; 3093 3094 return 0; 3095 } 3096 3097 /* set allow_edp_hotplug_detection */ 3098 static int allow_edp_hotplug_detection_set(void *data, u64 val) 3099 { 3100 struct amdgpu_dm_connector *aconnector = data; 3101 struct drm_connector *connector = &aconnector->base; 3102 struct drm_device *dev = connector->dev; 3103 struct amdgpu_device *adev = drm_to_adev(dev); 3104 3105 adev->dm.dc->config.allow_edp_hotplug_detection = (uint32_t) val; 3106 3107 return 0; 3108 } 3109 3110 /* check if kernel disallow eDP enter psr state 3111 * cat /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr 3112 * 0: allow edp enter psr; 1: disallow 3113 */ 3114 static int disallow_edp_enter_psr_get(void *data, u64 *val) 3115 { 3116 struct amdgpu_dm_connector *aconnector = data; 3117 3118 *val = (u64) aconnector->disallow_edp_enter_psr; 3119 return 0; 3120 } 3121 3122 /* set kernel disallow eDP enter psr state 3123 * echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr 3124 * 0: allow edp enter psr; 1: disallow 3125 * 3126 * usage: test app read crc from PSR eDP rx. 3127 * 3128 * during kernel boot up, kernel write dpcd 0x170 = 5. 3129 * this notify eDP rx psr enable and let rx check crc. 3130 * rx fw will start checking crc for rx internal logic. 3131 * crc read count within dpcd 0x246 is not updated and 3132 * value is 0. when eDP tx driver wants to read rx crc 3133 * from dpcd 0x246, 0x270, read count 0 lead tx driver 3134 * timeout. 3135 * 3136 * to avoid this, we add this debugfs to let test app to disbable 3137 * rx crc checking for rx internal logic. then test app can read 3138 * non-zero crc read count. 3139 * 3140 * expected app sequence is as below: 3141 * 1. disable eDP PHY and notify eDP rx with dpcd 0x600 = 2. 3142 * 2. echo 0x1 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr 3143 * 3. enable eDP PHY and notify eDP rx with dpcd 0x600 = 1 but 3144 * without dpcd 0x170 = 5. 3145 * 4. read crc from rx dpcd 0x270, 0x246, etc. 3146 * 5. echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr. 3147 * this will let eDP back to normal with psr setup dpcd 0x170 = 5. 3148 */ 3149 static int disallow_edp_enter_psr_set(void *data, u64 val) 3150 { 3151 struct amdgpu_dm_connector *aconnector = data; 3152 3153 aconnector->disallow_edp_enter_psr = val ? true : false; 3154 return 0; 3155 } 3156 3157 static int dmub_trace_mask_set(void *data, u64 val) 3158 { 3159 struct amdgpu_device *adev = data; 3160 struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub; 3161 enum dmub_gpint_command cmd; 3162 u64 mask = 0xffff; 3163 u8 shift = 0; 3164 u32 res; 3165 int i; 3166 3167 if (!srv->fw_version) 3168 return -EINVAL; 3169 3170 for (i = 0; i < 4; i++) { 3171 res = (val & mask) >> shift; 3172 3173 switch (i) { 3174 case 0: 3175 cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0; 3176 break; 3177 case 1: 3178 cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1; 3179 break; 3180 case 2: 3181 cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD2; 3182 break; 3183 case 3: 3184 cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD3; 3185 break; 3186 } 3187 3188 if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, res, NULL, DM_DMUB_WAIT_TYPE_WAIT)) 3189 return -EIO; 3190 3191 usleep_range(100, 1000); 3192 3193 mask <<= 16; 3194 shift += 16; 3195 } 3196 3197 return 0; 3198 } 3199 3200 static int dmub_trace_mask_show(void *data, u64 *val) 3201 { 3202 enum dmub_gpint_command cmd = DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0; 3203 struct amdgpu_device *adev = data; 3204 struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub; 3205 u8 shift = 0; 3206 u64 raw = 0; 3207 u64 res = 0; 3208 int i = 0; 3209 3210 if (!srv->fw_version) 3211 return -EINVAL; 3212 3213 while (i < 4) { 3214 uint32_t response; 3215 3216 if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, 0, &response, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 3217 return -EIO; 3218 3219 raw = response; 3220 usleep_range(100, 1000); 3221 3222 cmd++; 3223 res |= (raw << shift); 3224 shift += 16; 3225 i++; 3226 } 3227 3228 *val = res; 3229 3230 return 0; 3231 } 3232 3233 DEFINE_DEBUGFS_ATTRIBUTE(dmub_trace_mask_fops, dmub_trace_mask_show, 3234 dmub_trace_mask_set, "0x%llx\n"); 3235 3236 /* 3237 * Set dmcub trace event IRQ enable or disable. 3238 * Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en 3239 * Usage to disable dmcub trace event IRQ: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en 3240 */ 3241 static int dmcub_trace_event_state_set(void *data, u64 val) 3242 { 3243 struct amdgpu_device *adev = data; 3244 3245 if (val == 1 || val == 0) { 3246 dc_dmub_trace_event_control(adev->dm.dc, val); 3247 adev->dm.dmcub_trace_event_en = (bool)val; 3248 } else 3249 return 0; 3250 3251 return 0; 3252 } 3253 3254 /* 3255 * The interface doesn't need get function, so it will return the 3256 * value of zero 3257 * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en 3258 */ 3259 static int dmcub_trace_event_state_get(void *data, u64 *val) 3260 { 3261 struct amdgpu_device *adev = data; 3262 3263 *val = adev->dm.dmcub_trace_event_en; 3264 return 0; 3265 } 3266 3267 DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_get, 3268 dmcub_trace_event_state_set, "%llu\n"); 3269 3270 DEFINE_DEBUGFS_ATTRIBUTE(replay_state_fops, replay_get_state, NULL, "%llu\n"); 3271 3272 DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); 3273 DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL, 3274 "%llu\n"); 3275 3276 DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops, 3277 allow_edp_hotplug_detection_get, 3278 allow_edp_hotplug_detection_set, "%llu\n"); 3279 3280 DEFINE_DEBUGFS_ATTRIBUTE(disallow_edp_enter_psr_fops, 3281 disallow_edp_enter_psr_get, 3282 disallow_edp_enter_psr_set, "%llu\n"); 3283 3284 DEFINE_SHOW_ATTRIBUTE(current_backlight); 3285 DEFINE_SHOW_ATTRIBUTE(target_backlight); 3286 DEFINE_SHOW_ATTRIBUTE(ips_status); 3287 3288 static const struct { 3289 char *name; 3290 const struct file_operations *fops; 3291 } connector_debugfs_entries[] = { 3292 {"force_yuv420_output", &force_yuv420_output_fops}, 3293 {"trigger_hotplug", &trigger_hotplug_debugfs_fops}, 3294 {"internal_display", &internal_display_fops}, 3295 {"odm_combine_segments", &odm_combine_segments_fops} 3296 }; 3297 3298 /* 3299 * Returns supported customized link rates by this eDP panel. 3300 * Example usage: cat /sys/kernel/debug/dri/0/eDP-x/ilr_setting 3301 */ 3302 static int edp_ilr_show(struct seq_file *m, void *unused) 3303 { 3304 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); 3305 struct dc_link *link = aconnector->dc_link; 3306 uint8_t supported_link_rates[16]; 3307 uint32_t link_rate_in_khz; 3308 uint32_t entry = 0; 3309 uint8_t dpcd_rev; 3310 3311 memset(supported_link_rates, 0, sizeof(supported_link_rates)); 3312 dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES, 3313 supported_link_rates, sizeof(supported_link_rates)); 3314 3315 dpcd_rev = link->dpcd_caps.dpcd_rev.raw; 3316 3317 if (dpcd_rev >= DP_DPCD_REV_13 && 3318 (supported_link_rates[entry+1] != 0 || supported_link_rates[entry] != 0)) { 3319 3320 for (entry = 0; entry < 16; entry += 2) { 3321 link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + 3322 supported_link_rates[entry]) * 200; 3323 seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz); 3324 } 3325 } else { 3326 seq_puts(m, "ILR is not supported by this eDP panel.\n"); 3327 } 3328 3329 return 0; 3330 } 3331 3332 /* 3333 * Set supported customized link rate to eDP panel. 3334 * 3335 * echo <lane_count> <link_rate option> > ilr_setting 3336 * 3337 * for example, supported ILR : [0] 1620000 kHz [1] 2160000 kHz [2] 2430000 kHz ... 3338 * echo 4 1 > /sys/kernel/debug/dri/0/eDP-x/ilr_setting 3339 * to set 4 lanes and 2.16 GHz 3340 */ 3341 static ssize_t edp_ilr_write(struct file *f, const char __user *buf, 3342 size_t size, loff_t *pos) 3343 { 3344 struct amdgpu_dm_connector *connector = file_inode(f)->i_private; 3345 struct dc_link *link = connector->dc_link; 3346 struct amdgpu_device *adev = drm_to_adev(connector->base.dev); 3347 struct dc *dc = (struct dc *)link->dc; 3348 struct dc_link_settings prefer_link_settings; 3349 char *wr_buf = NULL; 3350 const uint32_t wr_buf_size = 40; 3351 /* 0: lane_count; 1: link_rate */ 3352 int max_param_num = 2; 3353 uint8_t param_nums = 0; 3354 long param[2]; 3355 bool valid_input = true; 3356 3357 if (size == 0) 3358 return -EINVAL; 3359 3360 wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); 3361 if (!wr_buf) 3362 return -ENOMEM; 3363 3364 if (parse_write_buffer_into_params(wr_buf, wr_buf_size, 3365 (long *)param, buf, 3366 max_param_num, 3367 ¶m_nums)) { 3368 kfree(wr_buf); 3369 return -EINVAL; 3370 } 3371 3372 if (param_nums <= 0) { 3373 kfree(wr_buf); 3374 return -EINVAL; 3375 } 3376 3377 switch (param[0]) { 3378 case LANE_COUNT_ONE: 3379 case LANE_COUNT_TWO: 3380 case LANE_COUNT_FOUR: 3381 break; 3382 default: 3383 valid_input = false; 3384 break; 3385 } 3386 3387 if (param[1] >= link->dpcd_caps.edp_supported_link_rates_count) 3388 valid_input = false; 3389 3390 if (!valid_input) { 3391 kfree(wr_buf); 3392 DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n"); 3393 prefer_link_settings.use_link_rate_set = false; 3394 mutex_lock(&adev->dm.dc_lock); 3395 dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); 3396 mutex_unlock(&adev->dm.dc_lock); 3397 return size; 3398 } 3399 3400 /* save user force lane_count, link_rate to preferred settings 3401 * spread spectrum will not be changed 3402 */ 3403 prefer_link_settings.link_spread = link->cur_link_settings.link_spread; 3404 prefer_link_settings.lane_count = param[0]; 3405 prefer_link_settings.use_link_rate_set = true; 3406 prefer_link_settings.link_rate_set = param[1]; 3407 prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]]; 3408 3409 mutex_lock(&adev->dm.dc_lock); 3410 dc_link_set_preferred_training_settings(dc, &prefer_link_settings, 3411 NULL, link, false); 3412 mutex_unlock(&adev->dm.dc_lock); 3413 3414 kfree(wr_buf); 3415 return size; 3416 } 3417 3418 static int edp_ilr_open(struct inode *inode, struct file *file) 3419 { 3420 return single_open(file, edp_ilr_show, inode->i_private); 3421 } 3422 3423 static const struct file_operations edp_ilr_debugfs_fops = { 3424 .owner = THIS_MODULE, 3425 .open = edp_ilr_open, 3426 .read = seq_read, 3427 .llseek = seq_lseek, 3428 .release = single_release, 3429 .write = edp_ilr_write 3430 }; 3431 3432 void connector_debugfs_init(struct amdgpu_dm_connector *connector) 3433 { 3434 int i; 3435 struct dentry *dir = connector->base.debugfs_entry; 3436 3437 if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort || 3438 connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { 3439 for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) { 3440 debugfs_create_file(dp_debugfs_entries[i].name, 3441 0644, dir, connector, 3442 dp_debugfs_entries[i].fops); 3443 } 3444 } 3445 if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { 3446 debugfs_create_file("replay_capability", 0444, dir, connector, 3447 &replay_capability_fops); 3448 debugfs_create_file("replay_state", 0444, dir, connector, &replay_state_fops); 3449 debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops); 3450 debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); 3451 debugfs_create_file_unsafe("psr_residency", 0444, dir, 3452 connector, &psr_residency_fops); 3453 debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector, 3454 ¤t_backlight_fops); 3455 debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector, 3456 &target_backlight_fops); 3457 debugfs_create_file("ilr_setting", 0644, dir, connector, 3458 &edp_ilr_debugfs_fops); 3459 debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector, 3460 &allow_edp_hotplug_detection_fops); 3461 debugfs_create_file("disallow_edp_enter_psr", 0644, dir, connector, 3462 &disallow_edp_enter_psr_fops); 3463 } 3464 3465 for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { 3466 debugfs_create_file(connector_debugfs_entries[i].name, 3467 0644, dir, connector, 3468 connector_debugfs_entries[i].fops); 3469 } 3470 3471 if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) { 3472 for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) { 3473 debugfs_create_file(hdmi_debugfs_entries[i].name, 3474 0644, dir, connector, 3475 hdmi_debugfs_entries[i].fops); 3476 } 3477 } 3478 } 3479 3480 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 3481 /* 3482 * Set crc window coordinate x start 3483 */ 3484 static int crc_win_x_start_set(void *data, u64 val) 3485 { 3486 struct drm_crtc *crtc = data; 3487 struct drm_device *drm_dev = crtc->dev; 3488 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 3489 3490 spin_lock_irq(&drm_dev->event_lock); 3491 acrtc->dm_irq_params.window_param.x_start = (uint16_t) val; 3492 acrtc->dm_irq_params.window_param.update_win = false; 3493 spin_unlock_irq(&drm_dev->event_lock); 3494 3495 return 0; 3496 } 3497 3498 /* 3499 * Get crc window coordinate x start 3500 */ 3501 static int crc_win_x_start_get(void *data, u64 *val) 3502 { 3503 struct drm_crtc *crtc = data; 3504 struct drm_device *drm_dev = crtc->dev; 3505 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 3506 3507 spin_lock_irq(&drm_dev->event_lock); 3508 *val = acrtc->dm_irq_params.window_param.x_start; 3509 spin_unlock_irq(&drm_dev->event_lock); 3510 3511 return 0; 3512 } 3513 3514 DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_start_fops, crc_win_x_start_get, 3515 crc_win_x_start_set, "%llu\n"); 3516 3517 3518 /* 3519 * Set crc window coordinate y start 3520 */ 3521 static int crc_win_y_start_set(void *data, u64 val) 3522 { 3523 struct drm_crtc *crtc = data; 3524 struct drm_device *drm_dev = crtc->dev; 3525 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 3526 3527 spin_lock_irq(&drm_dev->event_lock); 3528 acrtc->dm_irq_params.window_param.y_start = (uint16_t) val; 3529 acrtc->dm_irq_params.window_param.update_win = false; 3530 spin_unlock_irq(&drm_dev->event_lock); 3531 3532 return 0; 3533 } 3534 3535 /* 3536 * Get crc window coordinate y start 3537 */ 3538 static int crc_win_y_start_get(void *data, u64 *val) 3539 { 3540 struct drm_crtc *crtc = data; 3541 struct drm_device *drm_dev = crtc->dev; 3542 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 3543 3544 spin_lock_irq(&drm_dev->event_lock); 3545 *val = acrtc->dm_irq_params.window_param.y_start; 3546 spin_unlock_irq(&drm_dev->event_lock); 3547 3548 return 0; 3549 } 3550 3551 DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_start_fops, crc_win_y_start_get, 3552 crc_win_y_start_set, "%llu\n"); 3553 3554 /* 3555 * Set crc window coordinate x end 3556 */ 3557 static int crc_win_x_end_set(void *data, u64 val) 3558 { 3559 struct drm_crtc *crtc = data; 3560 struct drm_device *drm_dev = crtc->dev; 3561 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 3562 3563 spin_lock_irq(&drm_dev->event_lock); 3564 acrtc->dm_irq_params.window_param.x_end = (uint16_t) val; 3565 acrtc->dm_irq_params.window_param.update_win = false; 3566 spin_unlock_irq(&drm_dev->event_lock); 3567 3568 return 0; 3569 } 3570 3571 /* 3572 * Get crc window coordinate x end 3573 */ 3574 static int crc_win_x_end_get(void *data, u64 *val) 3575 { 3576 struct drm_crtc *crtc = data; 3577 struct drm_device *drm_dev = crtc->dev; 3578 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 3579 3580 spin_lock_irq(&drm_dev->event_lock); 3581 *val = acrtc->dm_irq_params.window_param.x_end; 3582 spin_unlock_irq(&drm_dev->event_lock); 3583 3584 return 0; 3585 } 3586 3587 DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_end_fops, crc_win_x_end_get, 3588 crc_win_x_end_set, "%llu\n"); 3589 3590 /* 3591 * Set crc window coordinate y end 3592 */ 3593 static int crc_win_y_end_set(void *data, u64 val) 3594 { 3595 struct drm_crtc *crtc = data; 3596 struct drm_device *drm_dev = crtc->dev; 3597 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 3598 3599 spin_lock_irq(&drm_dev->event_lock); 3600 acrtc->dm_irq_params.window_param.y_end = (uint16_t) val; 3601 acrtc->dm_irq_params.window_param.update_win = false; 3602 spin_unlock_irq(&drm_dev->event_lock); 3603 3604 return 0; 3605 } 3606 3607 /* 3608 * Get crc window coordinate y end 3609 */ 3610 static int crc_win_y_end_get(void *data, u64 *val) 3611 { 3612 struct drm_crtc *crtc = data; 3613 struct drm_device *drm_dev = crtc->dev; 3614 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 3615 3616 spin_lock_irq(&drm_dev->event_lock); 3617 *val = acrtc->dm_irq_params.window_param.y_end; 3618 spin_unlock_irq(&drm_dev->event_lock); 3619 3620 return 0; 3621 } 3622 3623 DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get, 3624 crc_win_y_end_set, "%llu\n"); 3625 /* 3626 * Trigger to commit crc window 3627 */ 3628 static int crc_win_update_set(void *data, u64 val) 3629 { 3630 struct drm_crtc *crtc = data; 3631 struct amdgpu_crtc *acrtc; 3632 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 3633 3634 if (val) { 3635 acrtc = to_amdgpu_crtc(crtc); 3636 mutex_lock(&adev->dm.dc_lock); 3637 /* PSR may write to OTG CRC window control register, 3638 * so close it before starting secure_display. 3639 */ 3640 amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream); 3641 3642 spin_lock_irq(&adev_to_drm(adev)->event_lock); 3643 3644 acrtc->dm_irq_params.window_param.activated = true; 3645 acrtc->dm_irq_params.window_param.update_win = true; 3646 acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; 3647 3648 spin_unlock_irq(&adev_to_drm(adev)->event_lock); 3649 mutex_unlock(&adev->dm.dc_lock); 3650 } 3651 3652 return 0; 3653 } 3654 3655 /* 3656 * Get crc window update flag 3657 */ 3658 static int crc_win_update_get(void *data, u64 *val) 3659 { 3660 *val = 0; 3661 return 0; 3662 } 3663 3664 DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get, 3665 crc_win_update_set, "%llu\n"); 3666 #endif 3667 void crtc_debugfs_init(struct drm_crtc *crtc) 3668 { 3669 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 3670 struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry); 3671 3672 if (!dir) 3673 return; 3674 3675 debugfs_create_file_unsafe("crc_win_x_start", 0644, dir, crtc, 3676 &crc_win_x_start_fops); 3677 debugfs_create_file_unsafe("crc_win_y_start", 0644, dir, crtc, 3678 &crc_win_y_start_fops); 3679 debugfs_create_file_unsafe("crc_win_x_end", 0644, dir, crtc, 3680 &crc_win_x_end_fops); 3681 debugfs_create_file_unsafe("crc_win_y_end", 0644, dir, crtc, 3682 &crc_win_y_end_fops); 3683 debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc, 3684 &crc_win_update_fops); 3685 dput(dir); 3686 #endif 3687 debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry, 3688 crtc, &amdgpu_current_bpc_fops); 3689 debugfs_create_file("amdgpu_current_colorspace", 0644, crtc->debugfs_entry, 3690 crtc, &amdgpu_current_colorspace_fops); 3691 } 3692 3693 /* 3694 * Writes DTN log state to the user supplied buffer. 3695 * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log 3696 */ 3697 static ssize_t dtn_log_read( 3698 struct file *f, 3699 char __user *buf, 3700 size_t size, 3701 loff_t *pos) 3702 { 3703 struct amdgpu_device *adev = file_inode(f)->i_private; 3704 struct dc *dc = adev->dm.dc; 3705 struct dc_log_buffer_ctx log_ctx = { 0 }; 3706 ssize_t result = 0; 3707 3708 if (!buf || !size) 3709 return -EINVAL; 3710 3711 if (!dc->hwss.log_hw_state) 3712 return 0; 3713 3714 dc->hwss.log_hw_state(dc, &log_ctx); 3715 3716 if (*pos < log_ctx.pos) { 3717 size_t to_copy = log_ctx.pos - *pos; 3718 3719 to_copy = min(to_copy, size); 3720 3721 if (!copy_to_user(buf, log_ctx.buf + *pos, to_copy)) { 3722 *pos += to_copy; 3723 result = to_copy; 3724 } 3725 } 3726 3727 kfree(log_ctx.buf); 3728 3729 return result; 3730 } 3731 3732 /* 3733 * Writes DTN log state to dmesg when triggered via a write. 3734 * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log 3735 */ 3736 static ssize_t dtn_log_write( 3737 struct file *f, 3738 const char __user *buf, 3739 size_t size, 3740 loff_t *pos) 3741 { 3742 struct amdgpu_device *adev = file_inode(f)->i_private; 3743 struct dc *dc = adev->dm.dc; 3744 3745 /* Write triggers log output via dmesg. */ 3746 if (size == 0) 3747 return 0; 3748 3749 if (dc->hwss.log_hw_state) 3750 dc->hwss.log_hw_state(dc, NULL); 3751 3752 return size; 3753 } 3754 3755 static int mst_topo_show(struct seq_file *m, void *unused) 3756 { 3757 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 3758 struct drm_device *dev = adev_to_drm(adev); 3759 struct drm_connector *connector; 3760 struct drm_connector_list_iter conn_iter; 3761 struct amdgpu_dm_connector *aconnector; 3762 3763 drm_connector_list_iter_begin(dev, &conn_iter); 3764 drm_for_each_connector_iter(connector, &conn_iter) { 3765 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 3766 continue; 3767 3768 aconnector = to_amdgpu_dm_connector(connector); 3769 3770 /* Ensure we're only dumping the topology of a root mst node */ 3771 if (!aconnector->mst_mgr.mst_state) 3772 continue; 3773 3774 seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id); 3775 drm_dp_mst_dump_topology(m, &aconnector->mst_mgr); 3776 } 3777 drm_connector_list_iter_end(&conn_iter); 3778 3779 return 0; 3780 } 3781 3782 /* 3783 * Sets trigger hpd for MST topologies. 3784 * All connected connectors will be rediscovered and re started as needed if val of 1 is sent. 3785 * All topologies will be disconnected if val of 0 is set . 3786 * Usage to enable topologies: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst 3787 * Usage to disable topologies: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst 3788 */ 3789 static int trigger_hpd_mst_set(void *data, u64 val) 3790 { 3791 struct amdgpu_device *adev = data; 3792 struct drm_device *dev = adev_to_drm(adev); 3793 struct drm_connector_list_iter iter; 3794 struct amdgpu_dm_connector *aconnector; 3795 struct drm_connector *connector; 3796 struct dc_link *link = NULL; 3797 3798 if (val == 1) { 3799 drm_connector_list_iter_begin(dev, &iter); 3800 drm_for_each_connector_iter(connector, &iter) { 3801 aconnector = to_amdgpu_dm_connector(connector); 3802 if (aconnector->dc_link->type == dc_connection_mst_branch && 3803 aconnector->mst_mgr.aux) { 3804 mutex_lock(&adev->dm.dc_lock); 3805 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 3806 mutex_unlock(&adev->dm.dc_lock); 3807 3808 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 3809 } 3810 } 3811 } else if (val == 0) { 3812 drm_connector_list_iter_begin(dev, &iter); 3813 drm_for_each_connector_iter(connector, &iter) { 3814 aconnector = to_amdgpu_dm_connector(connector); 3815 if (!aconnector->dc_link) 3816 continue; 3817 3818 if (!aconnector->mst_root) 3819 continue; 3820 3821 link = aconnector->dc_link; 3822 dc_link_dp_receiver_power_ctrl(link, false); 3823 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_root->mst_mgr, false); 3824 link->mst_stream_alloc_table.stream_count = 0; 3825 memset(link->mst_stream_alloc_table.stream_allocations, 0, 3826 sizeof(link->mst_stream_alloc_table.stream_allocations)); 3827 } 3828 } else { 3829 return 0; 3830 } 3831 drm_kms_helper_hotplug_event(dev); 3832 3833 return 0; 3834 } 3835 3836 /* 3837 * The interface doesn't need get function, so it will return the 3838 * value of zero 3839 * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst 3840 */ 3841 static int trigger_hpd_mst_get(void *data, u64 *val) 3842 { 3843 *val = 0; 3844 return 0; 3845 } 3846 3847 DEFINE_DEBUGFS_ATTRIBUTE(trigger_hpd_mst_ops, trigger_hpd_mst_get, 3848 trigger_hpd_mst_set, "%llu\n"); 3849 3850 3851 /* 3852 * Sets the force_timing_sync debug option from the given string. 3853 * All connected displays will be force synchronized immediately. 3854 * Usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync 3855 */ 3856 static int force_timing_sync_set(void *data, u64 val) 3857 { 3858 struct amdgpu_device *adev = data; 3859 3860 adev->dm.force_timing_sync = (bool)val; 3861 3862 amdgpu_dm_trigger_timing_sync(adev_to_drm(adev)); 3863 3864 return 0; 3865 } 3866 3867 /* 3868 * Gets the force_timing_sync debug option value into the given buffer. 3869 * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync 3870 */ 3871 static int force_timing_sync_get(void *data, u64 *val) 3872 { 3873 struct amdgpu_device *adev = data; 3874 3875 *val = adev->dm.force_timing_sync; 3876 3877 return 0; 3878 } 3879 3880 DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get, 3881 force_timing_sync_set, "%llu\n"); 3882 3883 3884 /* 3885 * Disables all HPD and HPD RX interrupt handling in the 3886 * driver when set to 1. Default is 0. 3887 */ 3888 static int disable_hpd_set(void *data, u64 val) 3889 { 3890 struct amdgpu_device *adev = data; 3891 3892 adev->dm.disable_hpd_irq = (bool)val; 3893 3894 return 0; 3895 } 3896 3897 3898 /* 3899 * Returns 1 if HPD and HPRX interrupt handling is disabled, 3900 * 0 otherwise. 3901 */ 3902 static int disable_hpd_get(void *data, u64 *val) 3903 { 3904 struct amdgpu_device *adev = data; 3905 3906 *val = adev->dm.disable_hpd_irq; 3907 3908 return 0; 3909 } 3910 3911 DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get, 3912 disable_hpd_set, "%llu\n"); 3913 3914 /* 3915 * Prints hardware capabilities. These are used for IGT testing. 3916 */ 3917 static int capabilities_show(struct seq_file *m, void *unused) 3918 { 3919 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 3920 struct dc *dc = adev->dm.dc; 3921 bool mall_supported = dc->caps.mall_size_total; 3922 bool subvp_supported = dc->caps.subvp_fw_processing_delay_us; 3923 unsigned int mall_in_use = false; 3924 unsigned int subvp_in_use = false; 3925 3926 struct hubbub *hubbub = dc->res_pool->hubbub; 3927 3928 if (hubbub->funcs->get_mall_en) 3929 hubbub->funcs->get_mall_en(hubbub, &mall_in_use); 3930 3931 if (dc->cap_funcs.get_subvp_en) 3932 subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state); 3933 3934 seq_printf(m, "mall supported: %s, enabled: %s\n", 3935 mall_supported ? "yes" : "no", mall_in_use ? "yes" : "no"); 3936 seq_printf(m, "sub-viewport supported: %s, enabled: %s\n", 3937 subvp_supported ? "yes" : "no", subvp_in_use ? "yes" : "no"); 3938 3939 return 0; 3940 } 3941 3942 DEFINE_SHOW_ATTRIBUTE(capabilities); 3943 3944 /* 3945 * Temporary w/a to force sst sequence in M42D DP2 mst receiver 3946 * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst 3947 */ 3948 static int dp_force_sst_set(void *data, u64 val) 3949 { 3950 struct amdgpu_device *adev = data; 3951 3952 adev->dm.dc->debug.set_mst_en_for_sst = val; 3953 3954 return 0; 3955 } 3956 3957 static int dp_force_sst_get(void *data, u64 *val) 3958 { 3959 struct amdgpu_device *adev = data; 3960 3961 *val = adev->dm.dc->debug.set_mst_en_for_sst; 3962 3963 return 0; 3964 } 3965 DEFINE_DEBUGFS_ATTRIBUTE(dp_set_mst_en_for_sst_ops, dp_force_sst_get, 3966 dp_force_sst_set, "%llu\n"); 3967 3968 /* 3969 * Force DP2 sequence without VESA certified cable. 3970 * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_ignore_cable_id 3971 */ 3972 static int dp_ignore_cable_id_set(void *data, u64 val) 3973 { 3974 struct amdgpu_device *adev = data; 3975 3976 adev->dm.dc->debug.ignore_cable_id = val; 3977 3978 return 0; 3979 } 3980 3981 static int dp_ignore_cable_id_get(void *data, u64 *val) 3982 { 3983 struct amdgpu_device *adev = data; 3984 3985 *val = adev->dm.dc->debug.ignore_cable_id; 3986 3987 return 0; 3988 } 3989 DEFINE_DEBUGFS_ATTRIBUTE(dp_ignore_cable_id_ops, dp_ignore_cable_id_get, 3990 dp_ignore_cable_id_set, "%llu\n"); 3991 3992 /* 3993 * Sets the DC visual confirm debug option from the given string. 3994 * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm 3995 */ 3996 static int visual_confirm_set(void *data, u64 val) 3997 { 3998 struct amdgpu_device *adev = data; 3999 4000 adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val; 4001 4002 return 0; 4003 } 4004 4005 /* 4006 * Reads the DC visual confirm debug option value into the given buffer. 4007 * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm 4008 */ 4009 static int visual_confirm_get(void *data, u64 *val) 4010 { 4011 struct amdgpu_device *adev = data; 4012 4013 *val = adev->dm.dc->debug.visual_confirm; 4014 4015 return 0; 4016 } 4017 4018 DEFINE_SHOW_ATTRIBUTE(mst_topo); 4019 DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get, 4020 visual_confirm_set, "%llu\n"); 4021 4022 4023 /* 4024 * Sets the DC skip_detection_link_training debug option from the given string. 4025 * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_skip_detection_link_training 4026 */ 4027 static int skip_detection_link_training_set(void *data, u64 val) 4028 { 4029 struct amdgpu_device *adev = data; 4030 4031 if (val == 0) 4032 adev->dm.dc->debug.skip_detection_link_training = false; 4033 else 4034 adev->dm.dc->debug.skip_detection_link_training = true; 4035 4036 return 0; 4037 } 4038 4039 /* 4040 * Reads the DC skip_detection_link_training debug option value into the given buffer. 4041 * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_skip_detection_link_training 4042 */ 4043 static int skip_detection_link_training_get(void *data, u64 *val) 4044 { 4045 struct amdgpu_device *adev = data; 4046 4047 *val = adev->dm.dc->debug.skip_detection_link_training; 4048 4049 return 0; 4050 } 4051 4052 DEFINE_DEBUGFS_ATTRIBUTE(skip_detection_link_training_fops, 4053 skip_detection_link_training_get, 4054 skip_detection_link_training_set, "%llu\n"); 4055 4056 /* 4057 * Dumps the DCC_EN bit for each pipe. 4058 * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dcc_en 4059 */ 4060 static ssize_t dcc_en_bits_read( 4061 struct file *f, 4062 char __user *buf, 4063 size_t size, 4064 loff_t *pos) 4065 { 4066 struct amdgpu_device *adev = file_inode(f)->i_private; 4067 struct dc *dc = adev->dm.dc; 4068 char *rd_buf = NULL; 4069 const uint32_t rd_buf_size = 32; 4070 uint32_t result = 0; 4071 int offset = 0; 4072 int num_pipes = dc->res_pool->pipe_count; 4073 int *dcc_en_bits; 4074 int i, r; 4075 4076 dcc_en_bits = kcalloc(num_pipes, sizeof(int), GFP_KERNEL); 4077 if (!dcc_en_bits) 4078 return -ENOMEM; 4079 4080 if (!dc->hwss.get_dcc_en_bits) { 4081 kfree(dcc_en_bits); 4082 return 0; 4083 } 4084 4085 dc->hwss.get_dcc_en_bits(dc, dcc_en_bits); 4086 4087 rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); 4088 if (!rd_buf) { 4089 kfree(dcc_en_bits); 4090 return -ENOMEM; 4091 } 4092 4093 for (i = 0; i < num_pipes; i++) 4094 offset += snprintf(rd_buf + offset, rd_buf_size - offset, 4095 "%d ", dcc_en_bits[i]); 4096 rd_buf[strlen(rd_buf)] = '\n'; 4097 4098 kfree(dcc_en_bits); 4099 4100 while (size) { 4101 if (*pos >= rd_buf_size) 4102 break; 4103 r = put_user(*(rd_buf + result), buf); 4104 if (r) { 4105 kfree(rd_buf); 4106 return r; /* r = -EFAULT */ 4107 } 4108 buf += 1; 4109 size -= 1; 4110 *pos += 1; 4111 result += 1; 4112 } 4113 4114 kfree(rd_buf); 4115 return result; 4116 } 4117 4118 void dtn_debugfs_init(struct amdgpu_device *adev) 4119 { 4120 static const struct file_operations dtn_log_fops = { 4121 .owner = THIS_MODULE, 4122 .read = dtn_log_read, 4123 .write = dtn_log_write, 4124 .llseek = default_llseek 4125 }; 4126 static const struct file_operations dcc_en_bits_fops = { 4127 .owner = THIS_MODULE, 4128 .read = dcc_en_bits_read, 4129 .llseek = default_llseek 4130 }; 4131 4132 struct drm_minor *minor = adev_to_drm(adev)->primary; 4133 struct dentry *root = minor->debugfs_root; 4134 4135 debugfs_create_file("amdgpu_mst_topology", 0444, root, 4136 adev, &mst_topo_fops); 4137 debugfs_create_file("amdgpu_dm_capabilities", 0444, root, 4138 adev, &capabilities_fops); 4139 debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, 4140 &dtn_log_fops); 4141 debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev, 4142 &dp_set_mst_en_for_sst_ops); 4143 debugfs_create_file("amdgpu_dm_dp_ignore_cable_id", 0644, root, adev, 4144 &dp_ignore_cable_id_ops); 4145 4146 debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev, 4147 &visual_confirm_fops); 4148 4149 debugfs_create_file_unsafe("amdgpu_dm_skip_detection_link_training", 0644, root, adev, 4150 &skip_detection_link_training_fops); 4151 4152 debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root, 4153 adev, &dmub_tracebuffer_fops); 4154 4155 debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root, 4156 adev, &dmub_fw_state_fops); 4157 4158 debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root, 4159 adev, &force_timing_sync_ops); 4160 4161 debugfs_create_file_unsafe("amdgpu_dm_dmub_trace_mask", 0644, root, 4162 adev, &dmub_trace_mask_fops); 4163 4164 debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root, 4165 adev, &dmcub_trace_event_state_fops); 4166 4167 debugfs_create_file_unsafe("amdgpu_dm_trigger_hpd_mst", 0644, root, 4168 adev, &trigger_hpd_mst_ops); 4169 4170 debugfs_create_file_unsafe("amdgpu_dm_dcc_en", 0644, root, adev, 4171 &dcc_en_bits_fops); 4172 4173 debugfs_create_file_unsafe("amdgpu_dm_disable_hpd", 0644, root, adev, 4174 &disable_hpd_ops); 4175 4176 if (adev->dm.dc->caps.ips_support) 4177 debugfs_create_file_unsafe("amdgpu_dm_ips_status", 0644, root, adev, 4178 &ips_status_fops); 4179 } 4180