1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/string.h> 27 #include <linux/acpi.h> 28 29 #include <drm/drmP.h> 30 #include <drm/drm_crtc_helper.h> 31 #include <drm/amdgpu_drm.h> 32 #include "dm_services.h" 33 #include "amdgpu.h" 34 #include "amdgpu_dm.h" 35 #include "amdgpu_dm_irq.h" 36 #include "amdgpu_pm.h" 37 38 unsigned long long dm_get_timestamp(struct dc_context *ctx) 39 { 40 struct timespec64 time; 41 42 getrawmonotonic64(&time); 43 return timespec64_to_ns(&time); 44 } 45 46 void dm_perf_trace_timestamp(const char *func_name, unsigned int line) 47 { 48 } 49 50 bool dm_write_persistent_data(struct dc_context *ctx, 51 const struct dc_sink *sink, 52 const char *module_name, 53 const char *key_name, 54 void *params, 55 unsigned int size, 56 struct persistent_data_flag *flag) 57 { 58 /*TODO implement*/ 59 return false; 60 } 61 62 bool dm_read_persistent_data(struct dc_context *ctx, 63 const struct dc_sink *sink, 64 const char *module_name, 65 const char *key_name, 66 void *params, 67 unsigned int size, 68 struct persistent_data_flag *flag) 69 { 70 /*TODO implement*/ 71 return false; 72 } 73 74 /**** power component interfaces ****/ 75 76 bool dm_pp_apply_display_requirements( 77 const struct dc_context *ctx, 78 const struct dm_pp_display_configuration *pp_display_cfg) 79 { 80 struct amdgpu_device *adev = ctx->driver_context; 81 82 if (adev->pm.dpm_enabled) { 83 84 memset(&adev->pm.pm_display_cfg, 0, 85 sizeof(adev->pm.pm_display_cfg)); 86 87 adev->pm.pm_display_cfg.cpu_cc6_disable = 88 pp_display_cfg->cpu_cc6_disable; 89 90 adev->pm.pm_display_cfg.cpu_pstate_disable = 91 pp_display_cfg->cpu_pstate_disable; 92 93 adev->pm.pm_display_cfg.cpu_pstate_separation_time = 94 pp_display_cfg->cpu_pstate_separation_time; 95 96 adev->pm.pm_display_cfg.nb_pstate_switch_disable = 97 pp_display_cfg->nb_pstate_switch_disable; 98 99 adev->pm.pm_display_cfg.num_display = 100 pp_display_cfg->display_count; 101 adev->pm.pm_display_cfg.num_path_including_non_display = 102 pp_display_cfg->display_count; 103 104 adev->pm.pm_display_cfg.min_core_set_clock = 105 pp_display_cfg->min_engine_clock_khz/10; 106 adev->pm.pm_display_cfg.min_core_set_clock_in_sr = 107 pp_display_cfg->min_engine_clock_deep_sleep_khz/10; 108 adev->pm.pm_display_cfg.min_mem_set_clock = 109 pp_display_cfg->min_memory_clock_khz/10; 110 111 adev->pm.pm_display_cfg.multi_monitor_in_sync = 112 pp_display_cfg->all_displays_in_sync; 113 adev->pm.pm_display_cfg.min_vblank_time = 114 pp_display_cfg->avail_mclk_switch_time_us; 115 116 adev->pm.pm_display_cfg.display_clk = 117 pp_display_cfg->disp_clk_khz/10; 118 119 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency = 120 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us; 121 122 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index; 123 adev->pm.pm_display_cfg.line_time_in_us = 124 pp_display_cfg->line_time_in_us; 125 126 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh; 127 adev->pm.pm_display_cfg.crossfire_display_index = -1; 128 adev->pm.pm_display_cfg.min_bus_bandwidth = 0; 129 130 /* TODO: complete implementation of 131 * pp_display_configuration_change(). 132 * Follow example of: 133 * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c 134 * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */ 135 if (adev->powerplay.pp_funcs->display_configuration_change) 136 adev->powerplay.pp_funcs->display_configuration_change( 137 adev->powerplay.pp_handle, 138 &adev->pm.pm_display_cfg); 139 140 /* TODO: replace by a separate call to 'apply display cfg'? */ 141 amdgpu_pm_compute_clocks(adev); 142 } 143 144 return true; 145 } 146 147 static void get_default_clock_levels( 148 enum dm_pp_clock_type clk_type, 149 struct dm_pp_clock_levels *clks) 150 { 151 uint32_t disp_clks_in_khz[6] = { 152 300000, 400000, 496560, 626090, 685720, 757900 }; 153 uint32_t sclks_in_khz[6] = { 154 300000, 360000, 423530, 514290, 626090, 720000 }; 155 uint32_t mclks_in_khz[2] = { 333000, 800000 }; 156 157 switch (clk_type) { 158 case DM_PP_CLOCK_TYPE_DISPLAY_CLK: 159 clks->num_levels = 6; 160 memmove(clks->clocks_in_khz, disp_clks_in_khz, 161 sizeof(disp_clks_in_khz)); 162 break; 163 case DM_PP_CLOCK_TYPE_ENGINE_CLK: 164 clks->num_levels = 6; 165 memmove(clks->clocks_in_khz, sclks_in_khz, 166 sizeof(sclks_in_khz)); 167 break; 168 case DM_PP_CLOCK_TYPE_MEMORY_CLK: 169 clks->num_levels = 2; 170 memmove(clks->clocks_in_khz, mclks_in_khz, 171 sizeof(mclks_in_khz)); 172 break; 173 default: 174 clks->num_levels = 0; 175 break; 176 } 177 } 178 179 static enum amd_pp_clock_type dc_to_pp_clock_type( 180 enum dm_pp_clock_type dm_pp_clk_type) 181 { 182 enum amd_pp_clock_type amd_pp_clk_type = 0; 183 184 switch (dm_pp_clk_type) { 185 case DM_PP_CLOCK_TYPE_DISPLAY_CLK: 186 amd_pp_clk_type = amd_pp_disp_clock; 187 break; 188 case DM_PP_CLOCK_TYPE_ENGINE_CLK: 189 amd_pp_clk_type = amd_pp_sys_clock; 190 break; 191 case DM_PP_CLOCK_TYPE_MEMORY_CLK: 192 amd_pp_clk_type = amd_pp_mem_clock; 193 break; 194 default: 195 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n", 196 dm_pp_clk_type); 197 break; 198 } 199 200 return amd_pp_clk_type; 201 } 202 203 static void pp_to_dc_clock_levels( 204 const struct amd_pp_clocks *pp_clks, 205 struct dm_pp_clock_levels *dc_clks, 206 enum dm_pp_clock_type dc_clk_type) 207 { 208 uint32_t i; 209 210 if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) { 211 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", 212 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), 213 pp_clks->count, 214 DM_PP_MAX_CLOCK_LEVELS); 215 216 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS; 217 } else 218 dc_clks->num_levels = pp_clks->count; 219 220 DRM_INFO("DM_PPLIB: values for %s clock\n", 221 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); 222 223 for (i = 0; i < dc_clks->num_levels; i++) { 224 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]); 225 /* translate 10kHz to kHz */ 226 dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10; 227 } 228 } 229 230 bool dm_pp_get_clock_levels_by_type( 231 const struct dc_context *ctx, 232 enum dm_pp_clock_type clk_type, 233 struct dm_pp_clock_levels *dc_clks) 234 { 235 struct amdgpu_device *adev = ctx->driver_context; 236 void *pp_handle = adev->powerplay.pp_handle; 237 struct amd_pp_clocks pp_clks = { 0 }; 238 struct amd_pp_simple_clock_info validation_clks = { 0 }; 239 uint32_t i; 240 241 if (adev->powerplay.pp_funcs->get_clock_by_type) { 242 if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, 243 dc_to_pp_clock_type(clk_type), &pp_clks)) { 244 /* Error in pplib. Provide default values. */ 245 get_default_clock_levels(clk_type, dc_clks); 246 return true; 247 } 248 } 249 250 pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); 251 252 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) { 253 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks( 254 pp_handle, &validation_clks)) { 255 /* Error in pplib. Provide default values. */ 256 DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); 257 validation_clks.engine_max_clock = 72000; 258 validation_clks.memory_max_clock = 80000; 259 validation_clks.level = 0; 260 } 261 } 262 263 DRM_INFO("DM_PPLIB: Validation clocks:\n"); 264 DRM_INFO("DM_PPLIB: engine_max_clock: %d\n", 265 validation_clks.engine_max_clock); 266 DRM_INFO("DM_PPLIB: memory_max_clock: %d\n", 267 validation_clks.memory_max_clock); 268 DRM_INFO("DM_PPLIB: level : %d\n", 269 validation_clks.level); 270 271 /* Translate 10 kHz to kHz. */ 272 validation_clks.engine_max_clock *= 10; 273 validation_clks.memory_max_clock *= 10; 274 275 /* Determine the highest non-boosted level from the Validation Clocks */ 276 if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) { 277 for (i = 0; i < dc_clks->num_levels; i++) { 278 if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { 279 /* This clock is higher the validation clock. 280 * Than means the previous one is the highest 281 * non-boosted one. */ 282 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", 283 dc_clks->num_levels, i); 284 dc_clks->num_levels = i > 0 ? i : 1; 285 break; 286 } 287 } 288 } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) { 289 for (i = 0; i < dc_clks->num_levels; i++) { 290 if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) { 291 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n", 292 dc_clks->num_levels, i); 293 dc_clks->num_levels = i > 0 ? i : 1; 294 break; 295 } 296 } 297 } 298 299 return true; 300 } 301 302 bool dm_pp_get_clock_levels_by_type_with_latency( 303 const struct dc_context *ctx, 304 enum dm_pp_clock_type clk_type, 305 struct dm_pp_clock_levels_with_latency *clk_level_info) 306 { 307 /* TODO: to be implemented */ 308 return false; 309 } 310 311 bool dm_pp_get_clock_levels_by_type_with_voltage( 312 const struct dc_context *ctx, 313 enum dm_pp_clock_type clk_type, 314 struct dm_pp_clock_levels_with_voltage *clk_level_info) 315 { 316 /* TODO: to be implemented */ 317 return false; 318 } 319 320 bool dm_pp_notify_wm_clock_changes( 321 const struct dc_context *ctx, 322 struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges) 323 { 324 /* TODO: to be implemented */ 325 return false; 326 } 327 328 bool dm_pp_apply_power_level_change_request( 329 const struct dc_context *ctx, 330 struct dm_pp_power_level_change_request *level_change_req) 331 { 332 /* TODO: to be implemented */ 333 return false; 334 } 335 336 bool dm_pp_apply_clock_for_voltage_request( 337 const struct dc_context *ctx, 338 struct dm_pp_clock_for_voltage_req *clock_for_voltage_req) 339 { 340 /* TODO: to be implemented */ 341 return false; 342 } 343 344 bool dm_pp_get_static_clocks( 345 const struct dc_context *ctx, 346 struct dm_pp_static_clock_info *static_clk_info) 347 { 348 /* TODO: to be implemented */ 349 return false; 350 } 351 352 void dm_pp_get_funcs_rv( 353 struct dc_context *ctx, 354 struct pp_smu_funcs_rv *funcs) 355 {} 356 357 /**** end of power component interfaces ****/ 358