1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/string.h> 27 #include <linux/acpi.h> 28 29 #include <drm/drmP.h> 30 #include <drm/drm_crtc_helper.h> 31 #include <drm/amdgpu_drm.h> 32 #include "dm_services.h" 33 #include "amdgpu.h" 34 #include "amdgpu_dm.h" 35 #include "amdgpu_dm_irq.h" 36 #include "amdgpu_pm.h" 37 #include "dm_pp_smu.h" 38 #include "../../powerplay/inc/hwmgr.h" 39 #include "../../powerplay/hwmgr/smu10_hwmgr.h" 40 41 42 43 unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx, 44 unsigned long long current_time_stamp, 45 unsigned long long last_time_stamp) 46 { 47 return current_time_stamp - last_time_stamp; 48 } 49 50 void dm_perf_trace_timestamp(const char *func_name, unsigned int line) 51 { 52 } 53 54 bool dm_write_persistent_data(struct dc_context *ctx, 55 const struct dc_sink *sink, 56 const char *module_name, 57 const char *key_name, 58 void *params, 59 unsigned int size, 60 struct persistent_data_flag *flag) 61 { 62 /*TODO implement*/ 63 return false; 64 } 65 66 bool dm_read_persistent_data(struct dc_context *ctx, 67 const struct dc_sink *sink, 68 const char *module_name, 69 const char *key_name, 70 void *params, 71 unsigned int size, 72 struct persistent_data_flag *flag) 73 { 74 /*TODO implement*/ 75 return false; 76 } 77 78 /**** power component interfaces ****/ 79 80 bool dm_pp_apply_display_requirements( 81 const struct dc_context *ctx, 82 const struct dm_pp_display_configuration *pp_display_cfg) 83 { 84 struct amdgpu_device *adev = ctx->driver_context; 85 86 if (adev->pm.dpm_enabled) { 87 88 memset(&adev->pm.pm_display_cfg, 0, 89 sizeof(adev->pm.pm_display_cfg)); 90 91 adev->pm.pm_display_cfg.cpu_cc6_disable = 92 pp_display_cfg->cpu_cc6_disable; 93 94 adev->pm.pm_display_cfg.cpu_pstate_disable = 95 pp_display_cfg->cpu_pstate_disable; 96 97 adev->pm.pm_display_cfg.cpu_pstate_separation_time = 98 pp_display_cfg->cpu_pstate_separation_time; 99 100 adev->pm.pm_display_cfg.nb_pstate_switch_disable = 101 pp_display_cfg->nb_pstate_switch_disable; 102 103 adev->pm.pm_display_cfg.num_display = 104 pp_display_cfg->display_count; 105 adev->pm.pm_display_cfg.num_path_including_non_display = 106 pp_display_cfg->display_count; 107 108 adev->pm.pm_display_cfg.min_core_set_clock = 109 pp_display_cfg->min_engine_clock_khz/10; 110 adev->pm.pm_display_cfg.min_core_set_clock_in_sr = 111 pp_display_cfg->min_engine_clock_deep_sleep_khz/10; 112 adev->pm.pm_display_cfg.min_mem_set_clock = 113 pp_display_cfg->min_memory_clock_khz/10; 114 115 adev->pm.pm_display_cfg.multi_monitor_in_sync = 116 pp_display_cfg->all_displays_in_sync; 117 adev->pm.pm_display_cfg.min_vblank_time = 118 pp_display_cfg->avail_mclk_switch_time_us; 119 120 adev->pm.pm_display_cfg.display_clk = 121 pp_display_cfg->disp_clk_khz/10; 122 123 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency = 124 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us; 125 126 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index; 127 adev->pm.pm_display_cfg.line_time_in_us = 128 pp_display_cfg->line_time_in_us; 129 130 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh; 131 adev->pm.pm_display_cfg.crossfire_display_index = -1; 132 adev->pm.pm_display_cfg.min_bus_bandwidth = 0; 133 134 /* TODO: complete implementation of 135 * pp_display_configuration_change(). 136 * Follow example of: 137 * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c 138 * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */ 139 if (adev->powerplay.pp_funcs->display_configuration_change) 140 adev->powerplay.pp_funcs->display_configuration_change( 141 adev->powerplay.pp_handle, 142 &adev->pm.pm_display_cfg); 143 144 /* TODO: replace by a separate call to 'apply display cfg'? */ 145 amdgpu_pm_compute_clocks(adev); 146 } 147 148 return true; 149 } 150 151 static void get_default_clock_levels( 152 enum dm_pp_clock_type clk_type, 153 struct dm_pp_clock_levels *clks) 154 { 155 uint32_t disp_clks_in_khz[6] = { 156 300000, 400000, 496560, 626090, 685720, 757900 }; 157 uint32_t sclks_in_khz[6] = { 158 300000, 360000, 423530, 514290, 626090, 720000 }; 159 uint32_t mclks_in_khz[2] = { 333000, 800000 }; 160 161 switch (clk_type) { 162 case DM_PP_CLOCK_TYPE_DISPLAY_CLK: 163 clks->num_levels = 6; 164 memmove(clks->clocks_in_khz, disp_clks_in_khz, 165 sizeof(disp_clks_in_khz)); 166 break; 167 case DM_PP_CLOCK_TYPE_ENGINE_CLK: 168 clks->num_levels = 6; 169 memmove(clks->clocks_in_khz, sclks_in_khz, 170 sizeof(sclks_in_khz)); 171 break; 172 case DM_PP_CLOCK_TYPE_MEMORY_CLK: 173 clks->num_levels = 2; 174 memmove(clks->clocks_in_khz, mclks_in_khz, 175 sizeof(mclks_in_khz)); 176 break; 177 default: 178 clks->num_levels = 0; 179 break; 180 } 181 } 182 183 static enum amd_pp_clock_type dc_to_pp_clock_type( 184 enum dm_pp_clock_type dm_pp_clk_type) 185 { 186 enum amd_pp_clock_type amd_pp_clk_type = 0; 187 188 switch (dm_pp_clk_type) { 189 case DM_PP_CLOCK_TYPE_DISPLAY_CLK: 190 amd_pp_clk_type = amd_pp_disp_clock; 191 break; 192 case DM_PP_CLOCK_TYPE_ENGINE_CLK: 193 amd_pp_clk_type = amd_pp_sys_clock; 194 break; 195 case DM_PP_CLOCK_TYPE_MEMORY_CLK: 196 amd_pp_clk_type = amd_pp_mem_clock; 197 break; 198 case DM_PP_CLOCK_TYPE_DCEFCLK: 199 amd_pp_clk_type = amd_pp_dcef_clock; 200 break; 201 case DM_PP_CLOCK_TYPE_DCFCLK: 202 amd_pp_clk_type = amd_pp_dcf_clock; 203 break; 204 case DM_PP_CLOCK_TYPE_PIXELCLK: 205 amd_pp_clk_type = amd_pp_pixel_clock; 206 break; 207 case DM_PP_CLOCK_TYPE_FCLK: 208 amd_pp_clk_type = amd_pp_f_clock; 209 break; 210 case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: 211 amd_pp_clk_type = amd_pp_dpp_clock; 212 break; 213 default: 214 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n", 215 dm_pp_clk_type); 216 break; 217 } 218 219 return amd_pp_clk_type; 220 } 221 222 static void pp_to_dc_clock_levels( 223 const struct amd_pp_clocks *pp_clks, 224 struct dm_pp_clock_levels *dc_clks, 225 enum dm_pp_clock_type dc_clk_type) 226 { 227 uint32_t i; 228 229 if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) { 230 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", 231 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), 232 pp_clks->count, 233 DM_PP_MAX_CLOCK_LEVELS); 234 235 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS; 236 } else 237 dc_clks->num_levels = pp_clks->count; 238 239 DRM_INFO("DM_PPLIB: values for %s clock\n", 240 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); 241 242 for (i = 0; i < dc_clks->num_levels; i++) { 243 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]); 244 /* translate 10kHz to kHz */ 245 dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10; 246 } 247 } 248 249 static void pp_to_dc_clock_levels_with_latency( 250 const struct pp_clock_levels_with_latency *pp_clks, 251 struct dm_pp_clock_levels_with_latency *clk_level_info, 252 enum dm_pp_clock_type dc_clk_type) 253 { 254 uint32_t i; 255 256 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { 257 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", 258 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), 259 pp_clks->num_levels, 260 DM_PP_MAX_CLOCK_LEVELS); 261 262 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; 263 } else 264 clk_level_info->num_levels = pp_clks->num_levels; 265 266 DRM_DEBUG("DM_PPLIB: values for %s clock\n", 267 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); 268 269 for (i = 0; i < clk_level_info->num_levels; i++) { 270 DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz); 271 /* translate 10kHz to kHz */ 272 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10; 273 clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; 274 } 275 } 276 277 static void pp_to_dc_clock_levels_with_voltage( 278 const struct pp_clock_levels_with_voltage *pp_clks, 279 struct dm_pp_clock_levels_with_voltage *clk_level_info, 280 enum dm_pp_clock_type dc_clk_type) 281 { 282 uint32_t i; 283 284 if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { 285 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", 286 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), 287 pp_clks->num_levels, 288 DM_PP_MAX_CLOCK_LEVELS); 289 290 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; 291 } else 292 clk_level_info->num_levels = pp_clks->num_levels; 293 294 DRM_INFO("DM_PPLIB: values for %s clock\n", 295 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); 296 297 for (i = 0; i < clk_level_info->num_levels; i++) { 298 DRM_INFO("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz); 299 /* translate 10kHz to kHz */ 300 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10; 301 clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv; 302 } 303 } 304 305 306 bool dm_pp_get_clock_levels_by_type( 307 const struct dc_context *ctx, 308 enum dm_pp_clock_type clk_type, 309 struct dm_pp_clock_levels *dc_clks) 310 { 311 struct amdgpu_device *adev = ctx->driver_context; 312 void *pp_handle = adev->powerplay.pp_handle; 313 struct amd_pp_clocks pp_clks = { 0 }; 314 struct amd_pp_simple_clock_info validation_clks = { 0 }; 315 uint32_t i; 316 317 if (adev->powerplay.pp_funcs->get_clock_by_type) { 318 if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, 319 dc_to_pp_clock_type(clk_type), &pp_clks)) { 320 /* Error in pplib. Provide default values. */ 321 get_default_clock_levels(clk_type, dc_clks); 322 return true; 323 } 324 } 325 326 pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); 327 328 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) { 329 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks( 330 pp_handle, &validation_clks)) { 331 /* Error in pplib. Provide default values. */ 332 DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); 333 validation_clks.engine_max_clock = 72000; 334 validation_clks.memory_max_clock = 80000; 335 validation_clks.level = 0; 336 } 337 } 338 339 DRM_INFO("DM_PPLIB: Validation clocks:\n"); 340 DRM_INFO("DM_PPLIB: engine_max_clock: %d\n", 341 validation_clks.engine_max_clock); 342 DRM_INFO("DM_PPLIB: memory_max_clock: %d\n", 343 validation_clks.memory_max_clock); 344 DRM_INFO("DM_PPLIB: level : %d\n", 345 validation_clks.level); 346 347 /* Translate 10 kHz to kHz. */ 348 validation_clks.engine_max_clock *= 10; 349 validation_clks.memory_max_clock *= 10; 350 351 /* Determine the highest non-boosted level from the Validation Clocks */ 352 if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) { 353 for (i = 0; i < dc_clks->num_levels; i++) { 354 if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { 355 /* This clock is higher the validation clock. 356 * Than means the previous one is the highest 357 * non-boosted one. */ 358 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", 359 dc_clks->num_levels, i); 360 dc_clks->num_levels = i > 0 ? i : 1; 361 break; 362 } 363 } 364 } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) { 365 for (i = 0; i < dc_clks->num_levels; i++) { 366 if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) { 367 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n", 368 dc_clks->num_levels, i); 369 dc_clks->num_levels = i > 0 ? i : 1; 370 break; 371 } 372 } 373 } 374 375 return true; 376 } 377 378 bool dm_pp_get_clock_levels_by_type_with_latency( 379 const struct dc_context *ctx, 380 enum dm_pp_clock_type clk_type, 381 struct dm_pp_clock_levels_with_latency *clk_level_info) 382 { 383 struct amdgpu_device *adev = ctx->driver_context; 384 void *pp_handle = adev->powerplay.pp_handle; 385 struct pp_clock_levels_with_latency pp_clks = { 0 }; 386 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 387 388 if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency) 389 return false; 390 391 if (pp_funcs->get_clock_by_type_with_latency(pp_handle, 392 dc_to_pp_clock_type(clk_type), 393 &pp_clks)) 394 return false; 395 396 pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type); 397 398 return true; 399 } 400 401 bool dm_pp_get_clock_levels_by_type_with_voltage( 402 const struct dc_context *ctx, 403 enum dm_pp_clock_type clk_type, 404 struct dm_pp_clock_levels_with_voltage *clk_level_info) 405 { 406 struct amdgpu_device *adev = ctx->driver_context; 407 void *pp_handle = adev->powerplay.pp_handle; 408 struct pp_clock_levels_with_voltage pp_clk_info = {0}; 409 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 410 411 if (pp_funcs->get_clock_by_type_with_voltage(pp_handle, 412 dc_to_pp_clock_type(clk_type), 413 &pp_clk_info)) 414 return false; 415 416 pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type); 417 418 return true; 419 } 420 421 bool dm_pp_notify_wm_clock_changes( 422 const struct dc_context *ctx, 423 struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges) 424 { 425 /* TODO: to be implemented */ 426 return false; 427 } 428 429 bool dm_pp_apply_power_level_change_request( 430 const struct dc_context *ctx, 431 struct dm_pp_power_level_change_request *level_change_req) 432 { 433 /* TODO: to be implemented */ 434 return false; 435 } 436 437 bool dm_pp_apply_clock_for_voltage_request( 438 const struct dc_context *ctx, 439 struct dm_pp_clock_for_voltage_req *clock_for_voltage_req) 440 { 441 struct amdgpu_device *adev = ctx->driver_context; 442 struct pp_display_clock_request pp_clock_request = {0}; 443 int ret = 0; 444 445 pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type); 446 pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz; 447 448 if (!pp_clock_request.clock_type) 449 return false; 450 451 if (adev->powerplay.pp_funcs->display_clock_voltage_request) 452 ret = adev->powerplay.pp_funcs->display_clock_voltage_request( 453 adev->powerplay.pp_handle, 454 &pp_clock_request); 455 if (ret) 456 return false; 457 return true; 458 } 459 460 bool dm_pp_get_static_clocks( 461 const struct dc_context *ctx, 462 struct dm_pp_static_clock_info *static_clk_info) 463 { 464 struct amdgpu_device *adev = ctx->driver_context; 465 struct amd_pp_clock_info pp_clk_info = {0}; 466 int ret = 0; 467 468 if (adev->powerplay.pp_funcs->get_current_clocks) 469 ret = adev->powerplay.pp_funcs->get_current_clocks( 470 adev->powerplay.pp_handle, 471 &pp_clk_info); 472 if (ret) 473 return false; 474 475 static_clk_info->max_clocks_state = pp_clk_info.max_clocks_state; 476 /* translate 10kHz to kHz */ 477 static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10; 478 static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10; 479 480 return true; 481 } 482 483 void pp_rv_set_display_requirement(struct pp_smu *pp, 484 struct pp_smu_display_requirement_rv *req) 485 { 486 struct amdgpu_device *adev = pp->ctx->driver_context; 487 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 488 int ret = 0; 489 if (hwmgr->hwmgr_func->set_deep_sleep_dcefclk) 490 ret = hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, req->hard_min_dcefclk_khz/10); 491 if (hwmgr->hwmgr_func->set_active_display_count) 492 ret = hwmgr->hwmgr_func->set_active_display_count(hwmgr, req->display_count); 493 494 //store_cc6 is not yet implemented in SMU level 495 } 496 497 void pp_rv_set_wm_ranges(struct pp_smu *pp, 498 struct pp_smu_wm_range_sets *ranges) 499 { 500 struct amdgpu_device *adev = pp->ctx->driver_context; 501 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 502 struct pp_wm_sets_with_clock_ranges_soc15 ranges_soc15 = {0}; 503 int i = 0; 504 505 if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges || 506 !pp || !ranges) 507 return; 508 509 //not entirely sure if thats a correct assignment 510 ranges_soc15.num_wm_sets_dmif = ranges->num_reader_wm_sets; 511 ranges_soc15.num_wm_sets_mcif = ranges->num_writer_wm_sets; 512 513 for (i = 0; i < ranges_soc15.num_wm_sets_dmif; i++) { 514 if (ranges->reader_wm_sets[i].wm_inst > 3) 515 ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A; 516 else 517 ranges_soc15.wm_sets_dmif[i].wm_set_id = 518 ranges->reader_wm_sets[i].wm_inst; 519 ranges_soc15.wm_sets_dmif[i].wm_max_dcefclk_in_khz = 520 ranges->reader_wm_sets[i].max_drain_clk_khz; 521 ranges_soc15.wm_sets_dmif[i].wm_min_dcefclk_in_khz = 522 ranges->reader_wm_sets[i].min_drain_clk_khz; 523 ranges_soc15.wm_sets_dmif[i].wm_max_memclk_in_khz = 524 ranges->reader_wm_sets[i].max_fill_clk_khz; 525 ranges_soc15.wm_sets_dmif[i].wm_min_memclk_in_khz = 526 ranges->reader_wm_sets[i].min_fill_clk_khz; 527 } 528 529 for (i = 0; i < ranges_soc15.num_wm_sets_mcif; i++) { 530 if (ranges->writer_wm_sets[i].wm_inst > 3) 531 ranges_soc15.wm_sets_dmif[i].wm_set_id = DC_WM_SET_A; 532 else 533 ranges_soc15.wm_sets_mcif[i].wm_set_id = 534 ranges->writer_wm_sets[i].wm_inst; 535 ranges_soc15.wm_sets_mcif[i].wm_max_socclk_in_khz = 536 ranges->writer_wm_sets[i].max_fill_clk_khz; 537 ranges_soc15.wm_sets_mcif[i].wm_min_socclk_in_khz = 538 ranges->writer_wm_sets[i].min_fill_clk_khz; 539 ranges_soc15.wm_sets_mcif[i].wm_max_memclk_in_khz = 540 ranges->writer_wm_sets[i].max_fill_clk_khz; 541 ranges_soc15.wm_sets_mcif[i].wm_min_memclk_in_khz = 542 ranges->writer_wm_sets[i].min_fill_clk_khz; 543 } 544 545 hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr, &ranges_soc15); 546 547 } 548 549 void pp_rv_set_pme_wa_enable(struct pp_smu *pp) 550 { 551 struct amdgpu_device *adev = pp->ctx->driver_context; 552 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 553 554 if (hwmgr->hwmgr_func->smus_notify_pwe) 555 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr); 556 } 557 558 void dm_pp_get_funcs_rv( 559 struct dc_context *ctx, 560 struct pp_smu_funcs_rv *funcs) 561 { 562 funcs->pp_smu.ctx = ctx; 563 funcs->set_display_requirement = pp_rv_set_display_requirement; 564 funcs->set_wm_ranges = pp_rv_set_wm_ranges; 565 funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable; 566 } 567 568 569 /**** end of power component interfaces ****/ 570