1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: AMD 24 * 25 */ 26 27 #include <drm/drm_atomic_helper.h> 28 #include <drm/drm_blend.h> 29 #include <drm/drm_gem_atomic_helper.h> 30 #include <drm/drm_plane_helper.h> 31 #include <drm/drm_fourcc.h> 32 33 #include "amdgpu.h" 34 #include "dal_asic_id.h" 35 #include "amdgpu_display.h" 36 #include "amdgpu_dm_trace.h" 37 #include "amdgpu_dm_plane.h" 38 #include "gc/gc_11_0_0_offset.h" 39 #include "gc/gc_11_0_0_sh_mask.h" 40 41 /* 42 * TODO: these are currently initialized to rgb formats only. 43 * For future use cases we should either initialize them dynamically based on 44 * plane capabilities, or initialize this array to all formats, so internal drm 45 * check will succeed, and let DC implement proper check 46 */ 47 static const uint32_t rgb_formats[] = { 48 DRM_FORMAT_XRGB8888, 49 DRM_FORMAT_ARGB8888, 50 DRM_FORMAT_RGBA8888, 51 DRM_FORMAT_XRGB2101010, 52 DRM_FORMAT_XBGR2101010, 53 DRM_FORMAT_ARGB2101010, 54 DRM_FORMAT_ABGR2101010, 55 DRM_FORMAT_XRGB16161616, 56 DRM_FORMAT_XBGR16161616, 57 DRM_FORMAT_ARGB16161616, 58 DRM_FORMAT_ABGR16161616, 59 DRM_FORMAT_XBGR8888, 60 DRM_FORMAT_ABGR8888, 61 DRM_FORMAT_RGB565, 62 }; 63 64 static const uint32_t overlay_formats[] = { 65 DRM_FORMAT_XRGB8888, 66 DRM_FORMAT_ARGB8888, 67 DRM_FORMAT_RGBA8888, 68 DRM_FORMAT_XBGR8888, 69 DRM_FORMAT_ABGR8888, 70 DRM_FORMAT_RGB565, 71 DRM_FORMAT_NV21, 72 DRM_FORMAT_NV12, 73 DRM_FORMAT_P010 74 }; 75 76 static const uint32_t video_formats[] = { 77 DRM_FORMAT_NV21, 78 DRM_FORMAT_NV12, 79 DRM_FORMAT_P010 80 }; 81 82 static const u32 cursor_formats[] = { 83 DRM_FORMAT_ARGB8888 84 }; 85 86 enum dm_micro_swizzle { 87 MICRO_SWIZZLE_Z = 0, 88 MICRO_SWIZZLE_S = 1, 89 MICRO_SWIZZLE_D = 2, 90 MICRO_SWIZZLE_R = 3 91 }; 92 93 const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 94 { 95 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); 96 } 97 98 void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, 99 bool *per_pixel_alpha, bool *pre_multiplied_alpha, 100 bool *global_alpha, int *global_alpha_value) 101 { 102 *per_pixel_alpha = false; 103 *pre_multiplied_alpha = true; 104 *global_alpha = false; 105 *global_alpha_value = 0xff; 106 107 108 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || 109 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { 110 static const uint32_t alpha_formats[] = { 111 DRM_FORMAT_ARGB8888, 112 DRM_FORMAT_RGBA8888, 113 DRM_FORMAT_ABGR8888, 114 DRM_FORMAT_ARGB2101010, 115 DRM_FORMAT_ABGR2101010, 116 DRM_FORMAT_ARGB16161616, 117 DRM_FORMAT_ABGR16161616, 118 DRM_FORMAT_ARGB16161616F, 119 }; 120 uint32_t format = plane_state->fb->format->format; 121 unsigned int i; 122 123 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { 124 if (format == alpha_formats[i]) { 125 *per_pixel_alpha = true; 126 break; 127 } 128 } 129 130 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) 131 *pre_multiplied_alpha = false; 132 } 133 134 if (plane_state->alpha < 0xffff) { 135 *global_alpha = true; 136 *global_alpha_value = plane_state->alpha >> 8; 137 } 138 } 139 140 static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) 141 { 142 if (!*mods) 143 return; 144 145 if (*cap - *size < 1) { 146 uint64_t new_cap = *cap * 2; 147 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); 148 149 if (!new_mods) { 150 kfree(*mods); 151 *mods = NULL; 152 return; 153 } 154 155 memcpy(new_mods, *mods, sizeof(uint64_t) * *size); 156 kfree(*mods); 157 *mods = new_mods; 158 *cap = new_cap; 159 } 160 161 (*mods)[*size] = mod; 162 *size += 1; 163 } 164 165 static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier) 166 { 167 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); 168 } 169 170 static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier) 171 { 172 if (modifier == DRM_FORMAT_MOD_LINEAR) 173 return 0; 174 175 return AMD_FMT_MOD_GET(TILE, modifier); 176 } 177 178 static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, 179 uint64_t tiling_flags) 180 { 181 /* Fill GFX8 params */ 182 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { 183 unsigned int bankw, bankh, mtaspect, tile_split, num_banks; 184 185 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 186 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 187 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 188 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 189 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 190 191 /* XXX fix me for VI */ 192 tiling_info->gfx8.num_banks = num_banks; 193 tiling_info->gfx8.array_mode = 194 DC_ARRAY_2D_TILED_THIN1; 195 tiling_info->gfx8.tile_split = tile_split; 196 tiling_info->gfx8.bank_width = bankw; 197 tiling_info->gfx8.bank_height = bankh; 198 tiling_info->gfx8.tile_aspect = mtaspect; 199 tiling_info->gfx8.tile_mode = 200 DC_ADDR_SURF_MICRO_TILING_DISPLAY; 201 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) 202 == DC_ARRAY_1D_TILED_THIN1) { 203 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; 204 } 205 206 tiling_info->gfx8.pipe_config = 207 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 208 } 209 210 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, 211 union dc_tiling_info *tiling_info) 212 { 213 /* Fill GFX9 params */ 214 tiling_info->gfx9.num_pipes = 215 adev->gfx.config.gb_addr_config_fields.num_pipes; 216 tiling_info->gfx9.num_banks = 217 adev->gfx.config.gb_addr_config_fields.num_banks; 218 tiling_info->gfx9.pipe_interleave = 219 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; 220 tiling_info->gfx9.num_shader_engines = 221 adev->gfx.config.gb_addr_config_fields.num_se; 222 tiling_info->gfx9.max_compressed_frags = 223 adev->gfx.config.gb_addr_config_fields.max_compress_frags; 224 tiling_info->gfx9.num_rb_per_se = 225 adev->gfx.config.gb_addr_config_fields.num_rb_per_se; 226 tiling_info->gfx9.shaderEnable = 1; 227 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) 228 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; 229 } 230 231 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, 232 union dc_tiling_info *tiling_info, 233 uint64_t modifier) 234 { 235 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); 236 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); 237 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); 238 unsigned int pipes_log2; 239 240 pipes_log2 = min(5u, mod_pipe_xor_bits); 241 242 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info); 243 244 if (!IS_AMD_FMT_MOD(modifier)) 245 return; 246 247 tiling_info->gfx9.num_pipes = 1u << pipes_log2; 248 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); 249 250 if (adev->family >= AMDGPU_FAMILY_NV) { 251 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; 252 } else { 253 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; 254 255 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ 256 } 257 } 258 259 static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev, 260 const enum surface_pixel_format format, 261 const enum dc_rotation_angle rotation, 262 const union dc_tiling_info *tiling_info, 263 const struct dc_plane_dcc_param *dcc, 264 const struct dc_plane_address *address, 265 const struct plane_size *plane_size) 266 { 267 struct dc *dc = adev->dm.dc; 268 struct dc_dcc_surface_param input; 269 struct dc_surface_dcc_cap output; 270 271 memset(&input, 0, sizeof(input)); 272 memset(&output, 0, sizeof(output)); 273 274 if (!dcc->enable) 275 return 0; 276 277 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || 278 !dc->cap_funcs.get_dcc_compression_cap) 279 return -EINVAL; 280 281 input.format = format; 282 input.surface_size.width = plane_size->surface_size.width; 283 input.surface_size.height = plane_size->surface_size.height; 284 input.swizzle_mode = tiling_info->gfx9.swizzle; 285 286 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) 287 input.scan = SCAN_DIRECTION_HORIZONTAL; 288 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) 289 input.scan = SCAN_DIRECTION_VERTICAL; 290 291 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) 292 return -EINVAL; 293 294 if (!output.capable) 295 return -EINVAL; 296 297 if (dcc->independent_64b_blks == 0 && 298 output.grph.rgb.independent_64b_blks != 0) 299 return -EINVAL; 300 301 return 0; 302 } 303 304 static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, 305 const struct amdgpu_framebuffer *afb, 306 const enum surface_pixel_format format, 307 const enum dc_rotation_angle rotation, 308 const struct plane_size *plane_size, 309 union dc_tiling_info *tiling_info, 310 struct dc_plane_dcc_param *dcc, 311 struct dc_plane_address *address, 312 const bool force_disable_dcc) 313 { 314 const uint64_t modifier = afb->base.modifier; 315 int ret = 0; 316 317 amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); 318 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier); 319 320 if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) { 321 uint64_t dcc_address = afb->address + afb->base.offsets[1]; 322 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); 323 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); 324 325 dcc->enable = 1; 326 dcc->meta_pitch = afb->base.pitches[1]; 327 dcc->independent_64b_blks = independent_64b_blks; 328 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { 329 if (independent_64b_blks && independent_128b_blks) 330 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; 331 else if (independent_128b_blks) 332 dcc->dcc_ind_blk = hubp_ind_block_128b; 333 else if (independent_64b_blks && !independent_128b_blks) 334 dcc->dcc_ind_blk = hubp_ind_block_64b; 335 else 336 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 337 } else { 338 if (independent_64b_blks) 339 dcc->dcc_ind_blk = hubp_ind_block_64b; 340 else 341 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 342 } 343 344 address->grph.meta_addr.low_part = lower_32_bits(dcc_address); 345 address->grph.meta_addr.high_part = upper_32_bits(dcc_address); 346 } 347 348 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); 349 if (ret) 350 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret); 351 352 return ret; 353 } 354 355 static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device *adev, 356 const struct amdgpu_framebuffer *afb, 357 const enum surface_pixel_format format, 358 const enum dc_rotation_angle rotation, 359 const struct plane_size *plane_size, 360 union dc_tiling_info *tiling_info, 361 struct dc_plane_dcc_param *dcc, 362 struct dc_plane_address *address, 363 const bool force_disable_dcc) 364 { 365 const uint64_t modifier = afb->base.modifier; 366 int ret = 0; 367 368 /* TODO: Most of this function shouldn't be needed on GFX12. */ 369 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info); 370 371 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier); 372 373 if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) { 374 int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier); 375 376 dcc->enable = 1; 377 dcc->independent_64b_blks = max_compressed_block == 0; 378 379 if (max_compressed_block == 0) 380 dcc->dcc_ind_blk = hubp_ind_block_64b; 381 else if (max_compressed_block == 1) 382 dcc->dcc_ind_blk = hubp_ind_block_128b; 383 else 384 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 385 } 386 387 /* TODO: This seems wrong because there is no DCC plane on GFX12. */ 388 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); 389 if (ret) 390 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret); 391 392 return ret; 393 } 394 395 static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev, 396 uint64_t **mods, 397 uint64_t *size, 398 uint64_t *capacity) 399 { 400 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 401 402 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 403 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 404 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 405 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 406 AMD_FMT_MOD_SET(DCC, 1) | 407 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 408 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 409 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 410 411 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 412 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 413 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 414 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 415 AMD_FMT_MOD_SET(DCC, 1) | 416 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 417 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 418 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 419 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 420 421 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 422 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 423 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 424 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 425 426 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 427 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 428 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 429 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 430 431 432 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ 433 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 434 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 435 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 436 437 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 438 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 439 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 440 } 441 442 static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev, 443 uint64_t **mods, 444 uint64_t *size, 445 uint64_t *capacity) 446 { 447 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 448 int pipe_xor_bits = min(8, pipes + 449 ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); 450 int bank_xor_bits = min(8 - pipe_xor_bits, 451 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); 452 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + 453 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); 454 455 456 if (adev->family == AMDGPU_FAMILY_RV) { 457 /* Raven2 and later */ 458 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; 459 460 /* 461 * No _D DCC swizzles yet because we only allow 32bpp, which 462 * doesn't support _D on DCN 463 */ 464 465 if (has_constant_encode) { 466 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 467 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 468 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 469 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 470 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 471 AMD_FMT_MOD_SET(DCC, 1) | 472 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 473 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 474 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); 475 } 476 477 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 478 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 479 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 480 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 481 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 482 AMD_FMT_MOD_SET(DCC, 1) | 483 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 484 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 485 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); 486 487 if (has_constant_encode) { 488 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 489 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 490 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 491 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 492 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 493 AMD_FMT_MOD_SET(DCC, 1) | 494 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 495 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 496 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 497 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 498 AMD_FMT_MOD_SET(RB, rb) | 499 AMD_FMT_MOD_SET(PIPE, pipes)); 500 } 501 502 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 503 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 504 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 505 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 506 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 507 AMD_FMT_MOD_SET(DCC, 1) | 508 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 509 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 510 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 511 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | 512 AMD_FMT_MOD_SET(RB, rb) | 513 AMD_FMT_MOD_SET(PIPE, pipes)); 514 } 515 516 /* 517 * Only supported for 64bpp on Raven, will be filtered on format in 518 * amdgpu_dm_plane_format_mod_supported. 519 */ 520 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 521 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | 522 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 523 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 524 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 525 526 if (adev->family == AMDGPU_FAMILY_RV) { 527 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 528 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 529 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 530 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 531 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 532 } 533 534 /* 535 * Only supported for 64bpp on Raven, will be filtered on format in 536 * amdgpu_dm_plane_format_mod_supported. 537 */ 538 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 539 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 540 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 541 542 if (adev->family == AMDGPU_FAMILY_RV) { 543 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 544 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 545 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 546 } 547 } 548 549 static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev, 550 uint64_t **mods, 551 uint64_t *size, 552 uint64_t *capacity) 553 { 554 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 555 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); 556 557 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 558 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 559 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 560 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 561 AMD_FMT_MOD_SET(PACKERS, pkrs) | 562 AMD_FMT_MOD_SET(DCC, 1) | 563 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 564 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 565 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 566 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 567 568 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 569 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 570 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 571 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 572 AMD_FMT_MOD_SET(PACKERS, pkrs) | 573 AMD_FMT_MOD_SET(DCC, 1) | 574 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 575 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 576 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 577 578 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 579 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 580 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 581 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 582 AMD_FMT_MOD_SET(PACKERS, pkrs) | 583 AMD_FMT_MOD_SET(DCC, 1) | 584 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 585 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 586 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 587 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 588 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 589 590 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 591 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 592 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 593 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 594 AMD_FMT_MOD_SET(PACKERS, pkrs) | 595 AMD_FMT_MOD_SET(DCC, 1) | 596 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 597 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 598 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 599 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 600 601 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 602 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 603 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 604 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 605 AMD_FMT_MOD_SET(PACKERS, pkrs)); 606 607 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 608 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 609 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 610 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 611 AMD_FMT_MOD_SET(PACKERS, pkrs)); 612 613 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ 614 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 615 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 616 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 617 618 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 619 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 620 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 621 } 622 623 static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev, 624 uint64_t **mods, uint64_t *size, uint64_t *capacity) 625 { 626 int num_pipes = 0; 627 int pipe_xor_bits = 0; 628 int num_pkrs = 0; 629 int pkrs = 0; 630 u32 gb_addr_config; 631 u8 i = 0; 632 unsigned int swizzle_r_x; 633 uint64_t modifier_r_x; 634 uint64_t modifier_dcc_best; 635 uint64_t modifier_dcc_4k; 636 637 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from 638 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} 639 */ 640 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 641 ASSERT(gb_addr_config != 0); 642 643 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 644 pkrs = ilog2(num_pkrs); 645 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES); 646 pipe_xor_bits = ilog2(num_pipes); 647 648 for (i = 0; i < 2; i++) { 649 /* Insert the best one first. */ 650 /* R_X swizzle modes are the best for rendering and DCC requires them. */ 651 if (num_pipes > 16) 652 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X; 653 else 654 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X; 655 656 modifier_r_x = AMD_FMT_MOD | 657 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | 658 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 659 AMD_FMT_MOD_SET(TILE, swizzle_r_x) | 660 AMD_FMT_MOD_SET(PACKERS, pkrs); 661 662 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */ 663 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | 664 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) | 665 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 666 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B); 667 668 /* DCC settings for 4K and greater resolutions. (required by display hw) */ 669 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | 670 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 671 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 672 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); 673 674 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best); 675 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k); 676 677 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); 678 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); 679 680 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x); 681 } 682 683 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 684 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | 685 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); 686 } 687 688 static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev, 689 uint64_t **mods, uint64_t *size, uint64_t *capacity) 690 { 691 uint64_t ver = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12); 692 uint64_t mod_256k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D); 693 uint64_t mod_64k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D); 694 uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D); 695 uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D); 696 uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1); 697 uint8_t max_comp_block[] = {1, 0}; 698 uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0}; 699 uint8_t i = 0, j = 0; 700 uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR}; 701 702 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++) 703 max_comp_block_mod[i] = AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block[i]); 704 705 /* With DCC: Best choice should be kept first. Hence, add all 256k modifiers of different 706 * max compressed blocks first and then move on to the next smaller sized layouts. 707 * Do not add the linear modifier here, and hence the condition of size-1 for the loop 708 */ 709 for (j = 0; j < ARRAY_SIZE(gfx12_modifiers) - 1; j++) 710 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++) 711 amdgpu_dm_plane_add_modifier(mods, size, capacity, 712 ver | dcc | max_comp_block_mod[i] | gfx12_modifiers[j]); 713 714 /* Without DCC. Add all modifiers including linear at the end */ 715 for (i = 0; i < ARRAY_SIZE(gfx12_modifiers); i++) 716 amdgpu_dm_plane_add_modifier(mods, size, capacity, gfx12_modifiers[i]); 717 718 } 719 720 static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) 721 { 722 uint64_t size = 0, capacity = 128; 723 *mods = NULL; 724 725 /* We have not hooked up any pre-GFX9 modifiers. */ 726 if (adev->family < AMDGPU_FAMILY_AI) 727 return 0; 728 729 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); 730 731 if (plane_type == DRM_PLANE_TYPE_CURSOR) { 732 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 733 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 734 return *mods ? 0 : -ENOMEM; 735 } 736 737 switch (adev->family) { 738 case AMDGPU_FAMILY_AI: 739 case AMDGPU_FAMILY_RV: 740 amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity); 741 break; 742 case AMDGPU_FAMILY_NV: 743 case AMDGPU_FAMILY_VGH: 744 case AMDGPU_FAMILY_YC: 745 case AMDGPU_FAMILY_GC_10_3_6: 746 case AMDGPU_FAMILY_GC_10_3_7: 747 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) 748 amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity); 749 else 750 amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity); 751 break; 752 case AMDGPU_FAMILY_GC_11_0_0: 753 case AMDGPU_FAMILY_GC_11_0_1: 754 case AMDGPU_FAMILY_GC_11_5_0: 755 amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity); 756 break; 757 case AMDGPU_FAMILY_GC_12_0_0: 758 amdgpu_dm_plane_add_gfx12_modifiers(adev, mods, &size, &capacity); 759 break; 760 } 761 762 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 763 764 /* INVALID marks the end of the list. */ 765 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 766 767 if (!*mods) 768 return -ENOMEM; 769 770 return 0; 771 } 772 773 static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane, 774 const struct dc_plane_cap *plane_cap, 775 uint32_t *formats, int max_formats) 776 { 777 int i, num_formats = 0; 778 779 /* 780 * TODO: Query support for each group of formats directly from 781 * DC plane caps. This will require adding more formats to the 782 * caps list. 783 */ 784 785 if (plane->type == DRM_PLANE_TYPE_PRIMARY || 786 (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) { 787 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { 788 if (num_formats >= max_formats) 789 break; 790 791 formats[num_formats++] = rgb_formats[i]; 792 } 793 794 if (plane_cap && plane_cap->pixel_format_support.nv12) 795 formats[num_formats++] = DRM_FORMAT_NV12; 796 if (plane_cap && plane_cap->pixel_format_support.p010) 797 formats[num_formats++] = DRM_FORMAT_P010; 798 if (plane_cap && plane_cap->pixel_format_support.fp16) { 799 formats[num_formats++] = DRM_FORMAT_XRGB16161616F; 800 formats[num_formats++] = DRM_FORMAT_ARGB16161616F; 801 formats[num_formats++] = DRM_FORMAT_XBGR16161616F; 802 formats[num_formats++] = DRM_FORMAT_ABGR16161616F; 803 } 804 } else { 805 switch (plane->type) { 806 case DRM_PLANE_TYPE_OVERLAY: 807 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { 808 if (num_formats >= max_formats) 809 break; 810 811 formats[num_formats++] = overlay_formats[i]; 812 } 813 break; 814 815 case DRM_PLANE_TYPE_CURSOR: 816 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { 817 if (num_formats >= max_formats) 818 break; 819 820 formats[num_formats++] = cursor_formats[i]; 821 } 822 break; 823 824 default: 825 break; 826 } 827 } 828 829 return num_formats; 830 } 831 832 int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev, 833 const struct amdgpu_framebuffer *afb, 834 const enum surface_pixel_format format, 835 const enum dc_rotation_angle rotation, 836 const uint64_t tiling_flags, 837 union dc_tiling_info *tiling_info, 838 struct plane_size *plane_size, 839 struct dc_plane_dcc_param *dcc, 840 struct dc_plane_address *address, 841 bool tmz_surface, 842 bool force_disable_dcc) 843 { 844 const struct drm_framebuffer *fb = &afb->base; 845 int ret; 846 847 memset(tiling_info, 0, sizeof(*tiling_info)); 848 memset(plane_size, 0, sizeof(*plane_size)); 849 memset(dcc, 0, sizeof(*dcc)); 850 memset(address, 0, sizeof(*address)); 851 852 address->tmz_surface = tmz_surface; 853 854 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 855 uint64_t addr = afb->address + fb->offsets[0]; 856 857 plane_size->surface_size.x = 0; 858 plane_size->surface_size.y = 0; 859 plane_size->surface_size.width = fb->width; 860 plane_size->surface_size.height = fb->height; 861 plane_size->surface_pitch = 862 fb->pitches[0] / fb->format->cpp[0]; 863 864 address->type = PLN_ADDR_TYPE_GRAPHICS; 865 address->grph.addr.low_part = lower_32_bits(addr); 866 address->grph.addr.high_part = upper_32_bits(addr); 867 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { 868 uint64_t luma_addr = afb->address + fb->offsets[0]; 869 uint64_t chroma_addr = afb->address + fb->offsets[1]; 870 871 plane_size->surface_size.x = 0; 872 plane_size->surface_size.y = 0; 873 plane_size->surface_size.width = fb->width; 874 plane_size->surface_size.height = fb->height; 875 plane_size->surface_pitch = 876 fb->pitches[0] / fb->format->cpp[0]; 877 878 plane_size->chroma_size.x = 0; 879 plane_size->chroma_size.y = 0; 880 /* TODO: set these based on surface format */ 881 plane_size->chroma_size.width = fb->width / 2; 882 plane_size->chroma_size.height = fb->height / 2; 883 884 plane_size->chroma_pitch = 885 fb->pitches[1] / fb->format->cpp[1]; 886 887 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 888 address->video_progressive.luma_addr.low_part = 889 lower_32_bits(luma_addr); 890 address->video_progressive.luma_addr.high_part = 891 upper_32_bits(luma_addr); 892 address->video_progressive.chroma_addr.low_part = 893 lower_32_bits(chroma_addr); 894 address->video_progressive.chroma_addr.high_part = 895 upper_32_bits(chroma_addr); 896 } 897 898 if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) { 899 ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format, 900 rotation, plane_size, 901 tiling_info, dcc, 902 address, 903 force_disable_dcc); 904 if (ret) 905 return ret; 906 } else if (adev->family >= AMDGPU_FAMILY_AI) { 907 ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, 908 rotation, plane_size, 909 tiling_info, dcc, 910 address, 911 force_disable_dcc); 912 if (ret) 913 return ret; 914 } else { 915 amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); 916 } 917 918 return 0; 919 } 920 921 static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane, 922 struct drm_plane_state *new_state) 923 { 924 struct amdgpu_framebuffer *afb; 925 struct drm_gem_object *obj; 926 struct amdgpu_device *adev; 927 struct amdgpu_bo *rbo; 928 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 929 uint32_t domain; 930 int r; 931 932 if (!new_state->fb) { 933 DRM_DEBUG_KMS("No FB bound\n"); 934 return 0; 935 } 936 937 afb = to_amdgpu_framebuffer(new_state->fb); 938 obj = new_state->fb->obj[0]; 939 rbo = gem_to_amdgpu_bo(obj); 940 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 941 942 r = amdgpu_bo_reserve(rbo, true); 943 if (r) { 944 dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 945 return r; 946 } 947 948 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); 949 if (r) { 950 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); 951 goto error_unlock; 952 } 953 954 if (plane->type != DRM_PLANE_TYPE_CURSOR) 955 domain = amdgpu_display_supported_domains(adev, rbo->flags); 956 else 957 domain = AMDGPU_GEM_DOMAIN_VRAM; 958 959 r = amdgpu_bo_pin(rbo, domain); 960 if (unlikely(r != 0)) { 961 if (r != -ERESTARTSYS) 962 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 963 goto error_unlock; 964 } 965 966 r = amdgpu_ttm_alloc_gart(&rbo->tbo); 967 if (unlikely(r != 0)) { 968 DRM_ERROR("%p bind failed\n", rbo); 969 goto error_unpin; 970 } 971 972 r = drm_gem_plane_helper_prepare_fb(plane, new_state); 973 if (unlikely(r != 0)) 974 goto error_unpin; 975 976 amdgpu_bo_unreserve(rbo); 977 978 afb->address = amdgpu_bo_gpu_offset(rbo); 979 980 amdgpu_bo_ref(rbo); 981 982 /** 983 * We don't do surface updates on planes that have been newly created, 984 * but we also don't have the afb->address during atomic check. 985 * 986 * Fill in buffer attributes depending on the address here, but only on 987 * newly created planes since they're not being used by DC yet and this 988 * won't modify global state. 989 */ 990 dm_plane_state_old = to_dm_plane_state(plane->state); 991 dm_plane_state_new = to_dm_plane_state(new_state); 992 993 if (dm_plane_state_new->dc_state && 994 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { 995 struct dc_plane_state *plane_state = 996 dm_plane_state_new->dc_state; 997 bool force_disable_dcc = !plane_state->dcc.enable; 998 999 amdgpu_dm_plane_fill_plane_buffer_attributes( 1000 adev, afb, plane_state->format, plane_state->rotation, 1001 afb->tiling_flags, 1002 &plane_state->tiling_info, &plane_state->plane_size, 1003 &plane_state->dcc, &plane_state->address, 1004 afb->tmz_surface, force_disable_dcc); 1005 } 1006 1007 return 0; 1008 1009 error_unpin: 1010 amdgpu_bo_unpin(rbo); 1011 1012 error_unlock: 1013 amdgpu_bo_unreserve(rbo); 1014 return r; 1015 } 1016 1017 static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane, 1018 struct drm_plane_state *old_state) 1019 { 1020 struct amdgpu_bo *rbo; 1021 int r; 1022 1023 if (!old_state->fb) 1024 return; 1025 1026 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 1027 r = amdgpu_bo_reserve(rbo, false); 1028 if (unlikely(r)) { 1029 DRM_ERROR("failed to reserve rbo before unpin\n"); 1030 return; 1031 } 1032 1033 amdgpu_bo_unpin(rbo); 1034 amdgpu_bo_unreserve(rbo); 1035 amdgpu_bo_unref(&rbo); 1036 } 1037 1038 static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev, 1039 struct drm_framebuffer *fb, 1040 int *min_downscale, int *max_upscale) 1041 { 1042 struct amdgpu_device *adev = drm_to_adev(dev); 1043 struct dc *dc = adev->dm.dc; 1044 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ 1045 struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; 1046 1047 switch (fb->format->format) { 1048 case DRM_FORMAT_P010: 1049 case DRM_FORMAT_NV12: 1050 case DRM_FORMAT_NV21: 1051 *max_upscale = plane_cap->max_upscale_factor.nv12; 1052 *min_downscale = plane_cap->max_downscale_factor.nv12; 1053 break; 1054 1055 case DRM_FORMAT_XRGB16161616F: 1056 case DRM_FORMAT_ARGB16161616F: 1057 case DRM_FORMAT_XBGR16161616F: 1058 case DRM_FORMAT_ABGR16161616F: 1059 *max_upscale = plane_cap->max_upscale_factor.fp16; 1060 *min_downscale = plane_cap->max_downscale_factor.fp16; 1061 break; 1062 1063 default: 1064 *max_upscale = plane_cap->max_upscale_factor.argb8888; 1065 *min_downscale = plane_cap->max_downscale_factor.argb8888; 1066 break; 1067 } 1068 1069 /* 1070 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a 1071 * scaling factor of 1.0 == 1000 units. 1072 */ 1073 if (*max_upscale == 1) 1074 *max_upscale = 1000; 1075 1076 if (*min_downscale == 1) 1077 *min_downscale = 1000; 1078 } 1079 1080 int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state, 1081 struct drm_crtc_state *new_crtc_state) 1082 { 1083 struct drm_framebuffer *fb = state->fb; 1084 int min_downscale, max_upscale; 1085 int min_scale = 0; 1086 int max_scale = INT_MAX; 1087 1088 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ 1089 if (fb && state->crtc) { 1090 /* Validate viewport to cover the case when only the position changes */ 1091 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { 1092 int viewport_width = state->crtc_w; 1093 int viewport_height = state->crtc_h; 1094 1095 if (state->crtc_x < 0) 1096 viewport_width += state->crtc_x; 1097 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) 1098 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; 1099 1100 if (state->crtc_y < 0) 1101 viewport_height += state->crtc_y; 1102 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) 1103 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; 1104 1105 if (viewport_width < 0 || viewport_height < 0) { 1106 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); 1107 return -EINVAL; 1108 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ 1109 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); 1110 return -EINVAL; 1111 } else if (viewport_height < MIN_VIEWPORT_SIZE) { 1112 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); 1113 return -EINVAL; 1114 } 1115 1116 } 1117 1118 /* Get min/max allowed scaling factors from plane caps. */ 1119 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb, 1120 &min_downscale, &max_upscale); 1121 /* 1122 * Convert to drm convention: 16.16 fixed point, instead of dc's 1123 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's 1124 * dst/src, so min_scale = 1.0 / max_upscale, etc. 1125 */ 1126 min_scale = (1000 << 16) / max_upscale; 1127 max_scale = (1000 << 16) / min_downscale; 1128 } 1129 1130 return drm_atomic_helper_check_plane_state( 1131 state, new_crtc_state, min_scale, max_scale, true, true); 1132 } 1133 1134 int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, 1135 const struct drm_plane_state *state, 1136 struct dc_scaling_info *scaling_info) 1137 { 1138 int scale_w, scale_h, min_downscale, max_upscale; 1139 1140 memset(scaling_info, 0, sizeof(*scaling_info)); 1141 1142 /* Source is fixed 16.16 but we ignore mantissa for now... */ 1143 scaling_info->src_rect.x = state->src_x >> 16; 1144 scaling_info->src_rect.y = state->src_y >> 16; 1145 1146 /* 1147 * For reasons we don't (yet) fully understand a non-zero 1148 * src_y coordinate into an NV12 buffer can cause a 1149 * system hang on DCN1x. 1150 * To avoid hangs (and maybe be overly cautious) 1151 * let's reject both non-zero src_x and src_y. 1152 * 1153 * We currently know of only one use-case to reproduce a 1154 * scenario with non-zero src_x and src_y for NV12, which 1155 * is to gesture the YouTube Android app into full screen 1156 * on ChromeOS. 1157 */ 1158 if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || 1159 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) && 1160 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && 1161 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) 1162 return -EINVAL; 1163 1164 scaling_info->src_rect.width = state->src_w >> 16; 1165 if (scaling_info->src_rect.width == 0) 1166 return -EINVAL; 1167 1168 scaling_info->src_rect.height = state->src_h >> 16; 1169 if (scaling_info->src_rect.height == 0) 1170 return -EINVAL; 1171 1172 scaling_info->dst_rect.x = state->crtc_x; 1173 scaling_info->dst_rect.y = state->crtc_y; 1174 1175 if (state->crtc_w == 0) 1176 return -EINVAL; 1177 1178 scaling_info->dst_rect.width = state->crtc_w; 1179 1180 if (state->crtc_h == 0) 1181 return -EINVAL; 1182 1183 scaling_info->dst_rect.height = state->crtc_h; 1184 1185 /* DRM doesn't specify clipping on destination output. */ 1186 scaling_info->clip_rect = scaling_info->dst_rect; 1187 1188 /* Validate scaling per-format with DC plane caps */ 1189 if (state->plane && state->plane->dev && state->fb) { 1190 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb, 1191 &min_downscale, &max_upscale); 1192 } else { 1193 min_downscale = 250; 1194 max_upscale = 16000; 1195 } 1196 1197 scale_w = scaling_info->dst_rect.width * 1000 / 1198 scaling_info->src_rect.width; 1199 1200 if (scale_w < min_downscale || scale_w > max_upscale) 1201 return -EINVAL; 1202 1203 scale_h = scaling_info->dst_rect.height * 1000 / 1204 scaling_info->src_rect.height; 1205 1206 if (scale_h < min_downscale || scale_h > max_upscale) 1207 return -EINVAL; 1208 1209 /* 1210 * The "scaling_quality" can be ignored for now, quality = 0 has DC 1211 * assume reasonable defaults based on the format. 1212 */ 1213 1214 return 0; 1215 } 1216 1217 static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane, 1218 struct drm_atomic_state *state) 1219 { 1220 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 1221 plane); 1222 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1223 struct dc *dc = adev->dm.dc; 1224 struct dm_plane_state *dm_plane_state; 1225 struct dc_scaling_info scaling_info; 1226 struct drm_crtc_state *new_crtc_state; 1227 int ret; 1228 1229 trace_amdgpu_dm_plane_atomic_check(new_plane_state); 1230 1231 dm_plane_state = to_dm_plane_state(new_plane_state); 1232 1233 if (!dm_plane_state->dc_state) 1234 return 0; 1235 1236 new_crtc_state = 1237 drm_atomic_get_new_crtc_state(state, 1238 new_plane_state->crtc); 1239 if (!new_crtc_state) 1240 return -EINVAL; 1241 1242 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); 1243 if (ret) 1244 return ret; 1245 1246 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info); 1247 if (ret) 1248 return ret; 1249 1250 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 1251 return 0; 1252 1253 return -EINVAL; 1254 } 1255 1256 static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane, 1257 struct drm_atomic_state *state) 1258 { 1259 struct drm_crtc_state *new_crtc_state; 1260 struct drm_plane_state *new_plane_state; 1261 struct dm_crtc_state *dm_new_crtc_state; 1262 1263 /* Only support async updates on cursor planes. */ 1264 if (plane->type != DRM_PLANE_TYPE_CURSOR) 1265 return -EINVAL; 1266 1267 new_plane_state = drm_atomic_get_new_plane_state(state, plane); 1268 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 1269 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 1270 /* Reject overlay cursors for now*/ 1271 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) 1272 return -EINVAL; 1273 1274 return 0; 1275 } 1276 1277 int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, 1278 struct dc_cursor_position *position) 1279 { 1280 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1281 int x, y; 1282 int xorigin = 0, yorigin = 0; 1283 1284 if (!crtc || !plane->state->fb) 1285 return 0; 1286 1287 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || 1288 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { 1289 DRM_ERROR("%s: bad cursor width or height %d x %d\n", 1290 __func__, 1291 plane->state->crtc_w, 1292 plane->state->crtc_h); 1293 return -EINVAL; 1294 } 1295 1296 x = plane->state->crtc_x; 1297 y = plane->state->crtc_y; 1298 1299 if (x <= -amdgpu_crtc->max_cursor_width || 1300 y <= -amdgpu_crtc->max_cursor_height) 1301 return 0; 1302 1303 if (x < 0) { 1304 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 1305 x = 0; 1306 } 1307 if (y < 0) { 1308 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 1309 y = 0; 1310 } 1311 position->enable = true; 1312 position->translate_by_source = true; 1313 position->x = x; 1314 position->y = y; 1315 position->x_hotspot = xorigin; 1316 position->y_hotspot = yorigin; 1317 1318 return 0; 1319 } 1320 1321 void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, 1322 struct drm_plane_state *old_plane_state) 1323 { 1324 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1325 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 1326 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 1327 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 1328 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1329 uint64_t address = afb ? afb->address : 0; 1330 struct dc_cursor_position position = {0}; 1331 struct dc_cursor_attributes attributes; 1332 int ret; 1333 1334 if (!plane->state->fb && !old_plane_state->fb) 1335 return; 1336 1337 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", 1338 amdgpu_crtc->crtc_id, plane->state->crtc_w, 1339 plane->state->crtc_h); 1340 1341 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); 1342 if (ret) 1343 return; 1344 1345 if (!position.enable) { 1346 /* turn off cursor */ 1347 if (crtc_state && crtc_state->stream) { 1348 mutex_lock(&adev->dm.dc_lock); 1349 dc_stream_program_cursor_position(crtc_state->stream, 1350 &position); 1351 mutex_unlock(&adev->dm.dc_lock); 1352 } 1353 return; 1354 } 1355 1356 amdgpu_crtc->cursor_width = plane->state->crtc_w; 1357 amdgpu_crtc->cursor_height = plane->state->crtc_h; 1358 1359 memset(&attributes, 0, sizeof(attributes)); 1360 attributes.address.high_part = upper_32_bits(address); 1361 attributes.address.low_part = lower_32_bits(address); 1362 attributes.width = plane->state->crtc_w; 1363 attributes.height = plane->state->crtc_h; 1364 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 1365 attributes.rotation_angle = 0; 1366 attributes.attribute_flags.value = 0; 1367 1368 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM 1369 * legacy gamma setup. 1370 */ 1371 if (crtc_state->cm_is_degamma_srgb && 1372 adev->dm.dc->caps.color.dpp.gamma_corr) 1373 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; 1374 1375 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 1376 1377 if (crtc_state->stream) { 1378 mutex_lock(&adev->dm.dc_lock); 1379 if (!dc_stream_program_cursor_attributes(crtc_state->stream, 1380 &attributes)) 1381 DRM_ERROR("DC failed to set cursor attributes\n"); 1382 1383 if (!dc_stream_program_cursor_position(crtc_state->stream, 1384 &position)) 1385 DRM_ERROR("DC failed to set cursor position\n"); 1386 mutex_unlock(&adev->dm.dc_lock); 1387 } 1388 } 1389 1390 static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane, 1391 struct drm_atomic_state *state) 1392 { 1393 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 1394 plane); 1395 struct drm_plane_state *old_state = 1396 drm_atomic_get_old_plane_state(state, plane); 1397 1398 trace_amdgpu_dm_atomic_update_cursor(new_state); 1399 1400 swap(plane->state->fb, new_state->fb); 1401 1402 plane->state->src_x = new_state->src_x; 1403 plane->state->src_y = new_state->src_y; 1404 plane->state->src_w = new_state->src_w; 1405 plane->state->src_h = new_state->src_h; 1406 plane->state->crtc_x = new_state->crtc_x; 1407 plane->state->crtc_y = new_state->crtc_y; 1408 plane->state->crtc_w = new_state->crtc_w; 1409 plane->state->crtc_h = new_state->crtc_h; 1410 1411 amdgpu_dm_plane_handle_cursor_update(plane, old_state); 1412 } 1413 1414 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { 1415 .prepare_fb = amdgpu_dm_plane_helper_prepare_fb, 1416 .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb, 1417 .atomic_check = amdgpu_dm_plane_atomic_check, 1418 .atomic_async_check = amdgpu_dm_plane_atomic_async_check, 1419 .atomic_async_update = amdgpu_dm_plane_atomic_async_update 1420 }; 1421 1422 static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane) 1423 { 1424 struct dm_plane_state *amdgpu_state = NULL; 1425 1426 if (plane->state) 1427 plane->funcs->atomic_destroy_state(plane, plane->state); 1428 1429 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); 1430 WARN_ON(amdgpu_state == NULL); 1431 1432 if (!amdgpu_state) 1433 return; 1434 1435 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); 1436 amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; 1437 amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT; 1438 amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; 1439 amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; 1440 } 1441 1442 static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane) 1443 { 1444 struct dm_plane_state *dm_plane_state, *old_dm_plane_state; 1445 1446 old_dm_plane_state = to_dm_plane_state(plane->state); 1447 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); 1448 if (!dm_plane_state) 1449 return NULL; 1450 1451 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); 1452 1453 if (old_dm_plane_state->dc_state) { 1454 dm_plane_state->dc_state = old_dm_plane_state->dc_state; 1455 dc_plane_state_retain(dm_plane_state->dc_state); 1456 } 1457 1458 if (old_dm_plane_state->degamma_lut) 1459 dm_plane_state->degamma_lut = 1460 drm_property_blob_get(old_dm_plane_state->degamma_lut); 1461 if (old_dm_plane_state->ctm) 1462 dm_plane_state->ctm = 1463 drm_property_blob_get(old_dm_plane_state->ctm); 1464 if (old_dm_plane_state->shaper_lut) 1465 dm_plane_state->shaper_lut = 1466 drm_property_blob_get(old_dm_plane_state->shaper_lut); 1467 if (old_dm_plane_state->lut3d) 1468 dm_plane_state->lut3d = 1469 drm_property_blob_get(old_dm_plane_state->lut3d); 1470 if (old_dm_plane_state->blend_lut) 1471 dm_plane_state->blend_lut = 1472 drm_property_blob_get(old_dm_plane_state->blend_lut); 1473 1474 dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf; 1475 dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult; 1476 dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf; 1477 dm_plane_state->blend_tf = old_dm_plane_state->blend_tf; 1478 1479 return &dm_plane_state->base; 1480 } 1481 1482 static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane, 1483 uint32_t format, 1484 uint64_t modifier) 1485 { 1486 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1487 const struct drm_format_info *info = drm_format_info(format); 1488 int i; 1489 1490 if (!info) 1491 return false; 1492 1493 /* 1494 * We always have to allow these modifiers: 1495 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. 1496 * 2. Not passing any modifiers is the same as explicitly passing INVALID. 1497 */ 1498 if (modifier == DRM_FORMAT_MOD_LINEAR || 1499 modifier == DRM_FORMAT_MOD_INVALID) { 1500 return true; 1501 } 1502 1503 /* Check that the modifier is on the list of the plane's supported modifiers. */ 1504 for (i = 0; i < plane->modifier_count; i++) { 1505 if (modifier == plane->modifiers[i]) 1506 break; 1507 } 1508 if (i == plane->modifier_count) 1509 return false; 1510 1511 /* GFX12 doesn't have these limitations. */ 1512 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) { 1513 enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3; 1514 1515 /* 1516 * For D swizzle the canonical modifier depends on the bpp, so check 1517 * it here. 1518 */ 1519 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && 1520 adev->family >= AMDGPU_FAMILY_NV) { 1521 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) 1522 return false; 1523 } 1524 1525 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && 1526 info->cpp[0] < 8) 1527 return false; 1528 1529 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { 1530 /* Per radeonsi comments 16/64 bpp are more complicated. */ 1531 if (info->cpp[0] != 4) 1532 return false; 1533 /* We support multi-planar formats, but not when combined with 1534 * additional DCC metadata planes. 1535 */ 1536 if (info->num_planes > 1) 1537 return false; 1538 } 1539 } 1540 1541 return true; 1542 } 1543 1544 static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, 1545 struct drm_plane_state *state) 1546 { 1547 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1548 1549 if (dm_plane_state->degamma_lut) 1550 drm_property_blob_put(dm_plane_state->degamma_lut); 1551 if (dm_plane_state->ctm) 1552 drm_property_blob_put(dm_plane_state->ctm); 1553 if (dm_plane_state->lut3d) 1554 drm_property_blob_put(dm_plane_state->lut3d); 1555 if (dm_plane_state->shaper_lut) 1556 drm_property_blob_put(dm_plane_state->shaper_lut); 1557 if (dm_plane_state->blend_lut) 1558 drm_property_blob_put(dm_plane_state->blend_lut); 1559 1560 if (dm_plane_state->dc_state) 1561 dc_plane_state_release(dm_plane_state->dc_state); 1562 1563 drm_atomic_helper_plane_destroy_state(plane, state); 1564 } 1565 1566 #ifdef AMD_PRIVATE_COLOR 1567 static void 1568 dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm, 1569 struct drm_plane *plane) 1570 { 1571 struct amdgpu_mode_info mode_info = dm->adev->mode_info; 1572 struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp; 1573 1574 /* Check HW color pipeline capabilities on DPP block (pre-blending) 1575 * before exposing related properties. 1576 */ 1577 if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) { 1578 drm_object_attach_property(&plane->base, 1579 mode_info.plane_degamma_lut_property, 1580 0); 1581 drm_object_attach_property(&plane->base, 1582 mode_info.plane_degamma_lut_size_property, 1583 MAX_COLOR_LUT_ENTRIES); 1584 drm_object_attach_property(&plane->base, 1585 dm->adev->mode_info.plane_degamma_tf_property, 1586 AMDGPU_TRANSFER_FUNCTION_DEFAULT); 1587 } 1588 /* HDR MULT is always available */ 1589 drm_object_attach_property(&plane->base, 1590 dm->adev->mode_info.plane_hdr_mult_property, 1591 AMDGPU_HDR_MULT_DEFAULT); 1592 1593 /* Only enable plane CTM if both DPP and MPC gamut remap is available. */ 1594 if (dm->dc->caps.color.mpc.gamut_remap) 1595 drm_object_attach_property(&plane->base, 1596 dm->adev->mode_info.plane_ctm_property, 0); 1597 1598 if (dpp_color_caps.hw_3d_lut) { 1599 drm_object_attach_property(&plane->base, 1600 mode_info.plane_shaper_lut_property, 0); 1601 drm_object_attach_property(&plane->base, 1602 mode_info.plane_shaper_lut_size_property, 1603 MAX_COLOR_LUT_ENTRIES); 1604 drm_object_attach_property(&plane->base, 1605 mode_info.plane_shaper_tf_property, 1606 AMDGPU_TRANSFER_FUNCTION_DEFAULT); 1607 drm_object_attach_property(&plane->base, 1608 mode_info.plane_lut3d_property, 0); 1609 drm_object_attach_property(&plane->base, 1610 mode_info.plane_lut3d_size_property, 1611 MAX_COLOR_3DLUT_SIZE); 1612 } 1613 1614 if (dpp_color_caps.ogam_ram) { 1615 drm_object_attach_property(&plane->base, 1616 mode_info.plane_blend_lut_property, 0); 1617 drm_object_attach_property(&plane->base, 1618 mode_info.plane_blend_lut_size_property, 1619 MAX_COLOR_LUT_ENTRIES); 1620 drm_object_attach_property(&plane->base, 1621 mode_info.plane_blend_tf_property, 1622 AMDGPU_TRANSFER_FUNCTION_DEFAULT); 1623 } 1624 } 1625 1626 static int 1627 dm_atomic_plane_set_property(struct drm_plane *plane, 1628 struct drm_plane_state *state, 1629 struct drm_property *property, 1630 uint64_t val) 1631 { 1632 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1633 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1634 bool replaced = false; 1635 int ret; 1636 1637 if (property == adev->mode_info.plane_degamma_lut_property) { 1638 ret = drm_property_replace_blob_from_id(plane->dev, 1639 &dm_plane_state->degamma_lut, 1640 val, -1, 1641 sizeof(struct drm_color_lut), 1642 &replaced); 1643 dm_plane_state->base.color_mgmt_changed |= replaced; 1644 return ret; 1645 } else if (property == adev->mode_info.plane_degamma_tf_property) { 1646 if (dm_plane_state->degamma_tf != val) { 1647 dm_plane_state->degamma_tf = val; 1648 dm_plane_state->base.color_mgmt_changed = 1; 1649 } 1650 } else if (property == adev->mode_info.plane_hdr_mult_property) { 1651 if (dm_plane_state->hdr_mult != val) { 1652 dm_plane_state->hdr_mult = val; 1653 dm_plane_state->base.color_mgmt_changed = 1; 1654 } 1655 } else if (property == adev->mode_info.plane_ctm_property) { 1656 ret = drm_property_replace_blob_from_id(plane->dev, 1657 &dm_plane_state->ctm, 1658 val, 1659 sizeof(struct drm_color_ctm_3x4), -1, 1660 &replaced); 1661 dm_plane_state->base.color_mgmt_changed |= replaced; 1662 return ret; 1663 } else if (property == adev->mode_info.plane_shaper_lut_property) { 1664 ret = drm_property_replace_blob_from_id(plane->dev, 1665 &dm_plane_state->shaper_lut, 1666 val, -1, 1667 sizeof(struct drm_color_lut), 1668 &replaced); 1669 dm_plane_state->base.color_mgmt_changed |= replaced; 1670 return ret; 1671 } else if (property == adev->mode_info.plane_shaper_tf_property) { 1672 if (dm_plane_state->shaper_tf != val) { 1673 dm_plane_state->shaper_tf = val; 1674 dm_plane_state->base.color_mgmt_changed = 1; 1675 } 1676 } else if (property == adev->mode_info.plane_lut3d_property) { 1677 ret = drm_property_replace_blob_from_id(plane->dev, 1678 &dm_plane_state->lut3d, 1679 val, -1, 1680 sizeof(struct drm_color_lut), 1681 &replaced); 1682 dm_plane_state->base.color_mgmt_changed |= replaced; 1683 return ret; 1684 } else if (property == adev->mode_info.plane_blend_lut_property) { 1685 ret = drm_property_replace_blob_from_id(plane->dev, 1686 &dm_plane_state->blend_lut, 1687 val, -1, 1688 sizeof(struct drm_color_lut), 1689 &replaced); 1690 dm_plane_state->base.color_mgmt_changed |= replaced; 1691 return ret; 1692 } else if (property == adev->mode_info.plane_blend_tf_property) { 1693 if (dm_plane_state->blend_tf != val) { 1694 dm_plane_state->blend_tf = val; 1695 dm_plane_state->base.color_mgmt_changed = 1; 1696 } 1697 } else { 1698 drm_dbg_atomic(plane->dev, 1699 "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", 1700 plane->base.id, plane->name, 1701 property->base.id, property->name); 1702 return -EINVAL; 1703 } 1704 1705 return 0; 1706 } 1707 1708 static int 1709 dm_atomic_plane_get_property(struct drm_plane *plane, 1710 const struct drm_plane_state *state, 1711 struct drm_property *property, 1712 uint64_t *val) 1713 { 1714 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1715 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1716 1717 if (property == adev->mode_info.plane_degamma_lut_property) { 1718 *val = (dm_plane_state->degamma_lut) ? 1719 dm_plane_state->degamma_lut->base.id : 0; 1720 } else if (property == adev->mode_info.plane_degamma_tf_property) { 1721 *val = dm_plane_state->degamma_tf; 1722 } else if (property == adev->mode_info.plane_hdr_mult_property) { 1723 *val = dm_plane_state->hdr_mult; 1724 } else if (property == adev->mode_info.plane_ctm_property) { 1725 *val = (dm_plane_state->ctm) ? 1726 dm_plane_state->ctm->base.id : 0; 1727 } else if (property == adev->mode_info.plane_shaper_lut_property) { 1728 *val = (dm_plane_state->shaper_lut) ? 1729 dm_plane_state->shaper_lut->base.id : 0; 1730 } else if (property == adev->mode_info.plane_shaper_tf_property) { 1731 *val = dm_plane_state->shaper_tf; 1732 } else if (property == adev->mode_info.plane_lut3d_property) { 1733 *val = (dm_plane_state->lut3d) ? 1734 dm_plane_state->lut3d->base.id : 0; 1735 } else if (property == adev->mode_info.plane_blend_lut_property) { 1736 *val = (dm_plane_state->blend_lut) ? 1737 dm_plane_state->blend_lut->base.id : 0; 1738 } else if (property == adev->mode_info.plane_blend_tf_property) { 1739 *val = dm_plane_state->blend_tf; 1740 1741 } else { 1742 return -EINVAL; 1743 } 1744 1745 return 0; 1746 } 1747 #endif 1748 1749 static const struct drm_plane_funcs dm_plane_funcs = { 1750 .update_plane = drm_atomic_helper_update_plane, 1751 .disable_plane = drm_atomic_helper_disable_plane, 1752 .destroy = drm_plane_helper_destroy, 1753 .reset = amdgpu_dm_plane_drm_plane_reset, 1754 .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state, 1755 .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state, 1756 .format_mod_supported = amdgpu_dm_plane_format_mod_supported, 1757 #ifdef AMD_PRIVATE_COLOR 1758 .atomic_set_property = dm_atomic_plane_set_property, 1759 .atomic_get_property = dm_atomic_plane_get_property, 1760 #endif 1761 }; 1762 1763 int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 1764 struct drm_plane *plane, 1765 unsigned long possible_crtcs, 1766 const struct dc_plane_cap *plane_cap) 1767 { 1768 uint32_t formats[32]; 1769 int num_formats; 1770 int res = -EPERM; 1771 unsigned int supported_rotations; 1772 uint64_t *modifiers = NULL; 1773 unsigned int primary_zpos = dm->dc->caps.max_slave_planes; 1774 1775 num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats, 1776 ARRAY_SIZE(formats)); 1777 1778 res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers); 1779 if (res) 1780 return res; 1781 1782 if (modifiers == NULL) 1783 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true; 1784 1785 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, 1786 &dm_plane_funcs, formats, num_formats, 1787 modifiers, plane->type, NULL); 1788 kfree(modifiers); 1789 if (res) 1790 return res; 1791 1792 if (plane->type == DRM_PLANE_TYPE_OVERLAY && 1793 plane_cap && plane_cap->per_pixel_alpha) { 1794 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | 1795 BIT(DRM_MODE_BLEND_PREMULTI) | 1796 BIT(DRM_MODE_BLEND_COVERAGE); 1797 1798 drm_plane_create_alpha_property(plane); 1799 drm_plane_create_blend_mode_property(plane, blend_caps); 1800 } 1801 1802 if (plane->type == DRM_PLANE_TYPE_PRIMARY) { 1803 /* 1804 * Allow OVERLAY planes to be used as underlays by assigning an 1805 * immutable zpos = # of OVERLAY planes to the PRIMARY plane. 1806 */ 1807 drm_plane_create_zpos_immutable_property(plane, primary_zpos); 1808 } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) { 1809 /* 1810 * OVERLAY planes can be below or above the PRIMARY, but cannot 1811 * be above the CURSOR plane. 1812 */ 1813 unsigned int zpos = primary_zpos + 1 + drm_plane_index(plane); 1814 1815 drm_plane_create_zpos_property(plane, zpos, 0, 254); 1816 } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { 1817 drm_plane_create_zpos_immutable_property(plane, 255); 1818 } 1819 1820 if (plane->type == DRM_PLANE_TYPE_PRIMARY && 1821 plane_cap && 1822 (plane_cap->pixel_format_support.nv12 || 1823 plane_cap->pixel_format_support.p010)) { 1824 /* This only affects YUV formats. */ 1825 drm_plane_create_color_properties( 1826 plane, 1827 BIT(DRM_COLOR_YCBCR_BT601) | 1828 BIT(DRM_COLOR_YCBCR_BT709) | 1829 BIT(DRM_COLOR_YCBCR_BT2020), 1830 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 1831 BIT(DRM_COLOR_YCBCR_FULL_RANGE), 1832 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); 1833 } 1834 1835 supported_rotations = 1836 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | 1837 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; 1838 1839 if (dm->adev->asic_type >= CHIP_BONAIRE && 1840 plane->type != DRM_PLANE_TYPE_CURSOR) 1841 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 1842 supported_rotations); 1843 1844 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) && 1845 plane->type != DRM_PLANE_TYPE_CURSOR) 1846 drm_plane_enable_fb_damage_clips(plane); 1847 1848 drm_plane_helper_add(plane, &dm_plane_helper_funcs); 1849 1850 #ifdef AMD_PRIVATE_COLOR 1851 dm_atomic_plane_attach_color_mgmt_properties(dm, plane); 1852 #endif 1853 /* Create (reset) the plane state */ 1854 if (plane->funcs->reset) 1855 plane->funcs->reset(plane); 1856 1857 return 0; 1858 } 1859 1860 bool amdgpu_dm_plane_is_video_format(uint32_t format) 1861 { 1862 int i; 1863 1864 for (i = 0; i < ARRAY_SIZE(video_formats); i++) 1865 if (format == video_formats[i]) 1866 return true; 1867 1868 return false; 1869 } 1870 1871