1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: AMD 24 * 25 */ 26 27 #include <drm/drm_atomic_helper.h> 28 #include <drm/drm_blend.h> 29 #include "drm/drm_framebuffer.h" 30 #include <drm/drm_gem_atomic_helper.h> 31 #include <drm/drm_plane_helper.h> 32 #include <drm/drm_gem_framebuffer_helper.h> 33 #include <drm/drm_fourcc.h> 34 35 #include "amdgpu.h" 36 #include "dal_asic_id.h" 37 #include "amdgpu_display.h" 38 #include "amdgpu_dm_trace.h" 39 #include "amdgpu_dm_plane.h" 40 #include "gc/gc_11_0_0_offset.h" 41 #include "gc/gc_11_0_0_sh_mask.h" 42 43 /* 44 * TODO: these are currently initialized to rgb formats only. 45 * For future use cases we should either initialize them dynamically based on 46 * plane capabilities, or initialize this array to all formats, so internal drm 47 * check will succeed, and let DC implement proper check 48 */ 49 static const uint32_t rgb_formats[] = { 50 DRM_FORMAT_XRGB8888, 51 DRM_FORMAT_ARGB8888, 52 DRM_FORMAT_RGBA8888, 53 DRM_FORMAT_XRGB2101010, 54 DRM_FORMAT_XBGR2101010, 55 DRM_FORMAT_ARGB2101010, 56 DRM_FORMAT_ABGR2101010, 57 DRM_FORMAT_XRGB16161616, 58 DRM_FORMAT_XBGR16161616, 59 DRM_FORMAT_ARGB16161616, 60 DRM_FORMAT_ABGR16161616, 61 DRM_FORMAT_XBGR8888, 62 DRM_FORMAT_ABGR8888, 63 DRM_FORMAT_RGB565, 64 }; 65 66 static const uint32_t overlay_formats[] = { 67 DRM_FORMAT_XRGB8888, 68 DRM_FORMAT_ARGB8888, 69 DRM_FORMAT_RGBA8888, 70 DRM_FORMAT_XBGR8888, 71 DRM_FORMAT_ABGR8888, 72 DRM_FORMAT_RGB565, 73 DRM_FORMAT_NV21, 74 DRM_FORMAT_NV12, 75 DRM_FORMAT_P010 76 }; 77 78 static const uint32_t video_formats[] = { 79 DRM_FORMAT_NV21, 80 DRM_FORMAT_NV12, 81 DRM_FORMAT_P010 82 }; 83 84 static const u32 cursor_formats[] = { 85 DRM_FORMAT_ARGB8888 86 }; 87 88 enum dm_micro_swizzle { 89 MICRO_SWIZZLE_Z = 0, 90 MICRO_SWIZZLE_S = 1, 91 MICRO_SWIZZLE_D = 2, 92 MICRO_SWIZZLE_R = 3 93 }; 94 95 const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 96 { 97 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); 98 } 99 100 void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, 101 bool *per_pixel_alpha, bool *pre_multiplied_alpha, 102 bool *global_alpha, int *global_alpha_value) 103 { 104 *per_pixel_alpha = false; 105 *pre_multiplied_alpha = true; 106 *global_alpha = false; 107 *global_alpha_value = 0xff; 108 109 110 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || 111 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { 112 static const uint32_t alpha_formats[] = { 113 DRM_FORMAT_ARGB8888, 114 DRM_FORMAT_RGBA8888, 115 DRM_FORMAT_ABGR8888, 116 DRM_FORMAT_ARGB2101010, 117 DRM_FORMAT_ABGR2101010, 118 DRM_FORMAT_ARGB16161616, 119 DRM_FORMAT_ABGR16161616, 120 DRM_FORMAT_ARGB16161616F, 121 }; 122 uint32_t format = plane_state->fb->format->format; 123 unsigned int i; 124 125 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { 126 if (format == alpha_formats[i]) { 127 *per_pixel_alpha = true; 128 break; 129 } 130 } 131 132 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) 133 *pre_multiplied_alpha = false; 134 } 135 136 if (plane_state->alpha < 0xffff) { 137 *global_alpha = true; 138 *global_alpha_value = plane_state->alpha >> 8; 139 } 140 } 141 142 static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) 143 { 144 if (!*mods) 145 return; 146 147 if (*cap - *size < 1) { 148 uint64_t new_cap = *cap * 2; 149 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); 150 151 if (!new_mods) { 152 kfree(*mods); 153 *mods = NULL; 154 return; 155 } 156 157 memcpy(new_mods, *mods, sizeof(uint64_t) * *size); 158 kfree(*mods); 159 *mods = new_mods; 160 *cap = new_cap; 161 } 162 163 (*mods)[*size] = mod; 164 *size += 1; 165 } 166 167 static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier) 168 { 169 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); 170 } 171 172 static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier) 173 { 174 if (modifier == DRM_FORMAT_MOD_LINEAR) 175 return 0; 176 177 return AMD_FMT_MOD_GET(TILE, modifier); 178 } 179 180 static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info *tiling_info, 181 uint64_t tiling_flags) 182 { 183 /* Fill GFX8 params */ 184 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { 185 unsigned int bankw, bankh, mtaspect, tile_split, num_banks; 186 187 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 188 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 189 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 190 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 191 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 192 193 tiling_info->gfxversion = DcGfxVersion8; 194 /* XXX fix me for VI */ 195 tiling_info->gfx8.num_banks = num_banks; 196 tiling_info->gfx8.array_mode = 197 DC_ARRAY_2D_TILED_THIN1; 198 tiling_info->gfx8.tile_split = tile_split; 199 tiling_info->gfx8.bank_width = bankw; 200 tiling_info->gfx8.bank_height = bankh; 201 tiling_info->gfx8.tile_aspect = mtaspect; 202 tiling_info->gfx8.tile_mode = 203 DC_ADDR_SURF_MICRO_TILING_DISPLAY; 204 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) 205 == DC_ARRAY_1D_TILED_THIN1) { 206 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; 207 } 208 209 tiling_info->gfx8.pipe_config = 210 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 211 } 212 213 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, 214 struct dc_tiling_info *tiling_info) 215 { 216 /* Fill GFX9 params */ 217 tiling_info->gfx9.num_pipes = 218 adev->gfx.config.gb_addr_config_fields.num_pipes; 219 tiling_info->gfx9.num_banks = 220 adev->gfx.config.gb_addr_config_fields.num_banks; 221 tiling_info->gfx9.pipe_interleave = 222 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; 223 tiling_info->gfx9.num_shader_engines = 224 adev->gfx.config.gb_addr_config_fields.num_se; 225 tiling_info->gfx9.max_compressed_frags = 226 adev->gfx.config.gb_addr_config_fields.max_compress_frags; 227 tiling_info->gfx9.num_rb_per_se = 228 adev->gfx.config.gb_addr_config_fields.num_rb_per_se; 229 tiling_info->gfx9.shaderEnable = 1; 230 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) 231 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; 232 } 233 234 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, 235 struct dc_tiling_info *tiling_info, 236 uint64_t modifier) 237 { 238 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); 239 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); 240 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); 241 unsigned int pipes_log2; 242 243 pipes_log2 = min(5u, mod_pipe_xor_bits); 244 245 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info); 246 247 if (!IS_AMD_FMT_MOD(modifier)) 248 return; 249 250 tiling_info->gfx9.num_pipes = 1u << pipes_log2; 251 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); 252 253 if (adev->family >= AMDGPU_FAMILY_NV) { 254 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; 255 } else { 256 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; 257 258 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ 259 } 260 } 261 262 static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev, 263 const enum surface_pixel_format format, 264 const enum dc_rotation_angle rotation, 265 const struct dc_tiling_info *tiling_info, 266 const struct dc_plane_dcc_param *dcc, 267 const struct dc_plane_address *address, 268 const struct plane_size *plane_size) 269 { 270 struct dc *dc = adev->dm.dc; 271 struct dc_dcc_surface_param input; 272 struct dc_surface_dcc_cap output; 273 274 memset(&input, 0, sizeof(input)); 275 memset(&output, 0, sizeof(output)); 276 277 if (!dcc->enable) 278 return 0; 279 280 if (adev->family < AMDGPU_FAMILY_GC_12_0_0 && 281 format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 282 return -EINVAL; 283 284 if (!dc->cap_funcs.get_dcc_compression_cap) 285 return -EINVAL; 286 287 input.format = format; 288 input.surface_size.width = plane_size->surface_size.width; 289 input.surface_size.height = plane_size->surface_size.height; 290 input.swizzle_mode = tiling_info->gfx9.swizzle; 291 292 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) 293 input.scan = SCAN_DIRECTION_HORIZONTAL; 294 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) 295 input.scan = SCAN_DIRECTION_VERTICAL; 296 297 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) 298 return -EINVAL; 299 300 if (!output.capable) 301 return -EINVAL; 302 303 if (dcc->independent_64b_blks == 0 && 304 output.grph.rgb.independent_64b_blks != 0) 305 return -EINVAL; 306 307 return 0; 308 } 309 310 static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, 311 const struct amdgpu_framebuffer *afb, 312 const enum surface_pixel_format format, 313 const enum dc_rotation_angle rotation, 314 const struct plane_size *plane_size, 315 struct dc_tiling_info *tiling_info, 316 struct dc_plane_dcc_param *dcc, 317 struct dc_plane_address *address) 318 { 319 const uint64_t modifier = afb->base.modifier; 320 int ret = 0; 321 322 amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); 323 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier); 324 tiling_info->gfxversion = DcGfxVersion9; 325 326 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { 327 uint64_t dcc_address = afb->address + afb->base.offsets[1]; 328 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); 329 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); 330 331 dcc->enable = 1; 332 dcc->meta_pitch = afb->base.pitches[1]; 333 dcc->independent_64b_blks = independent_64b_blks; 334 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { 335 if (independent_64b_blks && independent_128b_blks) 336 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; 337 else if (independent_128b_blks) 338 dcc->dcc_ind_blk = hubp_ind_block_128b; 339 else if (independent_64b_blks && !independent_128b_blks) 340 dcc->dcc_ind_blk = hubp_ind_block_64b; 341 else 342 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 343 } else { 344 if (independent_64b_blks) 345 dcc->dcc_ind_blk = hubp_ind_block_64b; 346 else 347 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 348 } 349 350 address->grph.meta_addr.low_part = lower_32_bits(dcc_address); 351 address->grph.meta_addr.high_part = upper_32_bits(dcc_address); 352 } 353 354 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); 355 if (ret) 356 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret); 357 358 return ret; 359 } 360 361 static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device *adev, 362 const struct amdgpu_framebuffer *afb, 363 const enum surface_pixel_format format, 364 const enum dc_rotation_angle rotation, 365 const struct plane_size *plane_size, 366 struct dc_tiling_info *tiling_info, 367 struct dc_plane_dcc_param *dcc, 368 struct dc_plane_address *address) 369 { 370 const uint64_t modifier = afb->base.modifier; 371 int ret = 0; 372 373 /* TODO: Most of this function shouldn't be needed on GFX12. */ 374 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info); 375 376 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier); 377 tiling_info->gfxversion = DcGfxAddr3; 378 379 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { 380 int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier); 381 382 dcc->enable = 1; 383 dcc->independent_64b_blks = max_compressed_block == 0; 384 385 if (max_compressed_block == 0) 386 dcc->dcc_ind_blk = hubp_ind_block_64b; 387 else if (max_compressed_block == 1) 388 dcc->dcc_ind_blk = hubp_ind_block_128b; 389 else 390 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 391 } 392 393 /* TODO: This seems wrong because there is no DCC plane on GFX12. */ 394 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); 395 if (ret) 396 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret); 397 398 return ret; 399 } 400 401 static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev, 402 uint64_t **mods, 403 uint64_t *size, 404 uint64_t *capacity) 405 { 406 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 407 408 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 409 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 410 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 411 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 412 AMD_FMT_MOD_SET(DCC, 1) | 413 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 414 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 415 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 416 417 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 418 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 419 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 420 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 421 AMD_FMT_MOD_SET(DCC, 1) | 422 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 423 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 424 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 425 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 426 427 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 428 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 429 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 430 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 431 432 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 433 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 434 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 435 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 436 437 438 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ 439 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 440 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 441 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 442 443 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 444 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 445 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 446 } 447 448 static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev, 449 uint64_t **mods, 450 uint64_t *size, 451 uint64_t *capacity) 452 { 453 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 454 int pipe_xor_bits = min(8, pipes + 455 ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); 456 int bank_xor_bits = min(8 - pipe_xor_bits, 457 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); 458 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + 459 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); 460 461 462 if (adev->family == AMDGPU_FAMILY_RV) { 463 /* Raven2 and later */ 464 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; 465 466 /* 467 * No _D DCC swizzles yet because we only allow 32bpp, which 468 * doesn't support _D on DCN 469 */ 470 471 if (has_constant_encode) { 472 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 473 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 474 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 475 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 476 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 477 AMD_FMT_MOD_SET(DCC, 1) | 478 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 479 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 480 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); 481 } 482 483 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 484 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 485 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 486 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 487 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 488 AMD_FMT_MOD_SET(DCC, 1) | 489 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 490 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 491 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); 492 493 if (has_constant_encode) { 494 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 495 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 496 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 497 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 498 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 499 AMD_FMT_MOD_SET(DCC, 1) | 500 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 501 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 502 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 503 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 504 AMD_FMT_MOD_SET(RB, rb) | 505 AMD_FMT_MOD_SET(PIPE, pipes)); 506 } 507 508 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 509 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 510 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 511 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 512 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 513 AMD_FMT_MOD_SET(DCC, 1) | 514 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 515 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 516 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 517 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | 518 AMD_FMT_MOD_SET(RB, rb) | 519 AMD_FMT_MOD_SET(PIPE, pipes)); 520 } 521 522 /* 523 * Only supported for 64bpp on Raven, will be filtered on format in 524 * amdgpu_dm_plane_format_mod_supported. 525 */ 526 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 527 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | 528 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 529 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 530 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 531 532 if (adev->family == AMDGPU_FAMILY_RV) { 533 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 534 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 535 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 536 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 537 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 538 } 539 540 /* 541 * Only supported for 64bpp on Raven, will be filtered on format in 542 * amdgpu_dm_plane_format_mod_supported. 543 */ 544 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 545 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 546 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 547 548 if (adev->family == AMDGPU_FAMILY_RV) { 549 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 550 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 551 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 552 } 553 } 554 555 static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev, 556 uint64_t **mods, 557 uint64_t *size, 558 uint64_t *capacity) 559 { 560 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 561 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); 562 563 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 564 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 565 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 566 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 567 AMD_FMT_MOD_SET(PACKERS, pkrs) | 568 AMD_FMT_MOD_SET(DCC, 1) | 569 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 570 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 571 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 572 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 573 574 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 575 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 576 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 577 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 578 AMD_FMT_MOD_SET(PACKERS, pkrs) | 579 AMD_FMT_MOD_SET(DCC, 1) | 580 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 581 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 582 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 583 584 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 585 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 586 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 587 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 588 AMD_FMT_MOD_SET(PACKERS, pkrs) | 589 AMD_FMT_MOD_SET(DCC, 1) | 590 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 591 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 592 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 593 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 594 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 595 596 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 597 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 598 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 599 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 600 AMD_FMT_MOD_SET(PACKERS, pkrs) | 601 AMD_FMT_MOD_SET(DCC, 1) | 602 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 603 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 604 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 605 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 606 607 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 608 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 609 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 610 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 611 AMD_FMT_MOD_SET(PACKERS, pkrs)); 612 613 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 614 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 615 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 616 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 617 AMD_FMT_MOD_SET(PACKERS, pkrs)); 618 619 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ 620 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 621 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 622 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 623 624 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 625 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 626 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 627 } 628 629 static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev, 630 uint64_t **mods, uint64_t *size, uint64_t *capacity) 631 { 632 int num_pipes = 0; 633 int pipe_xor_bits = 0; 634 int num_pkrs = 0; 635 int pkrs = 0; 636 u32 gb_addr_config; 637 u8 i = 0; 638 unsigned int swizzle_r_x; 639 uint64_t modifier_r_x; 640 uint64_t modifier_dcc_best; 641 uint64_t modifier_dcc_4k; 642 643 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from 644 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} 645 */ 646 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 647 ASSERT(gb_addr_config != 0); 648 649 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 650 pkrs = ilog2(num_pkrs); 651 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES); 652 pipe_xor_bits = ilog2(num_pipes); 653 654 for (i = 0; i < 2; i++) { 655 /* Insert the best one first. */ 656 /* R_X swizzle modes are the best for rendering and DCC requires them. */ 657 if (num_pipes > 16) 658 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X; 659 else 660 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X; 661 662 modifier_r_x = AMD_FMT_MOD | 663 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | 664 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 665 AMD_FMT_MOD_SET(TILE, swizzle_r_x) | 666 AMD_FMT_MOD_SET(PACKERS, pkrs); 667 668 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */ 669 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | 670 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) | 671 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 672 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B); 673 674 /* DCC settings for 4K and greater resolutions. (required by display hw) */ 675 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | 676 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 677 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 678 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); 679 680 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best); 681 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k); 682 683 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); 684 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); 685 686 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x); 687 } 688 689 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 690 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | 691 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); 692 } 693 694 static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev, 695 uint64_t **mods, uint64_t *size, uint64_t *capacity) 696 { 697 uint64_t ver = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12); 698 uint64_t mod_256k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D); 699 uint64_t mod_64k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D); 700 uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D); 701 uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D); 702 uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1); 703 uint8_t max_comp_block[] = {2, 1, 0}; 704 uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0}; 705 uint8_t i = 0, j = 0; 706 uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR}; 707 708 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++) 709 max_comp_block_mod[i] = AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block[i]); 710 711 /* With DCC: Best choice should be kept first. Hence, add all 256k modifiers of different 712 * max compressed blocks first and then move on to the next smaller sized layouts. 713 * Do not add the linear modifier here, and hence the condition of size-1 for the loop 714 */ 715 for (j = 0; j < ARRAY_SIZE(gfx12_modifiers) - 1; j++) 716 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++) 717 amdgpu_dm_plane_add_modifier(mods, size, capacity, 718 ver | dcc | max_comp_block_mod[i] | gfx12_modifiers[j]); 719 720 /* Without DCC. Add all modifiers including linear at the end */ 721 for (i = 0; i < ARRAY_SIZE(gfx12_modifiers); i++) 722 amdgpu_dm_plane_add_modifier(mods, size, capacity, gfx12_modifiers[i]); 723 724 } 725 726 static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) 727 { 728 uint64_t size = 0, capacity = 128; 729 *mods = NULL; 730 731 /* We have not hooked up any pre-GFX9 modifiers. */ 732 if (adev->family < AMDGPU_FAMILY_AI) 733 return 0; 734 735 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); 736 737 if (plane_type == DRM_PLANE_TYPE_CURSOR) { 738 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 739 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 740 return *mods ? 0 : -ENOMEM; 741 } 742 743 switch (adev->family) { 744 case AMDGPU_FAMILY_AI: 745 case AMDGPU_FAMILY_RV: 746 amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity); 747 break; 748 case AMDGPU_FAMILY_NV: 749 case AMDGPU_FAMILY_VGH: 750 case AMDGPU_FAMILY_YC: 751 case AMDGPU_FAMILY_GC_10_3_6: 752 case AMDGPU_FAMILY_GC_10_3_7: 753 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) 754 amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity); 755 else 756 amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity); 757 break; 758 case AMDGPU_FAMILY_GC_11_0_0: 759 case AMDGPU_FAMILY_GC_11_0_1: 760 case AMDGPU_FAMILY_GC_11_5_0: 761 amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity); 762 break; 763 case AMDGPU_FAMILY_GC_12_0_0: 764 amdgpu_dm_plane_add_gfx12_modifiers(adev, mods, &size, &capacity); 765 break; 766 } 767 768 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 769 770 /* INVALID marks the end of the list. */ 771 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 772 773 if (!*mods) 774 return -ENOMEM; 775 776 return 0; 777 } 778 779 static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane, 780 const struct dc_plane_cap *plane_cap, 781 uint32_t *formats, int max_formats) 782 { 783 int i, num_formats = 0; 784 785 /* 786 * TODO: Query support for each group of formats directly from 787 * DC plane caps. This will require adding more formats to the 788 * caps list. 789 */ 790 791 if (plane->type == DRM_PLANE_TYPE_PRIMARY || 792 (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) { 793 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { 794 if (num_formats >= max_formats) 795 break; 796 797 formats[num_formats++] = rgb_formats[i]; 798 } 799 800 if (plane_cap && plane_cap->pixel_format_support.nv12) 801 formats[num_formats++] = DRM_FORMAT_NV12; 802 if (plane_cap && plane_cap->pixel_format_support.p010) 803 formats[num_formats++] = DRM_FORMAT_P010; 804 if (plane_cap && plane_cap->pixel_format_support.fp16) { 805 formats[num_formats++] = DRM_FORMAT_XRGB16161616F; 806 formats[num_formats++] = DRM_FORMAT_ARGB16161616F; 807 formats[num_formats++] = DRM_FORMAT_XBGR16161616F; 808 formats[num_formats++] = DRM_FORMAT_ABGR16161616F; 809 } 810 } else { 811 switch (plane->type) { 812 case DRM_PLANE_TYPE_OVERLAY: 813 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { 814 if (num_formats >= max_formats) 815 break; 816 817 formats[num_formats++] = overlay_formats[i]; 818 } 819 break; 820 821 case DRM_PLANE_TYPE_CURSOR: 822 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { 823 if (num_formats >= max_formats) 824 break; 825 826 formats[num_formats++] = cursor_formats[i]; 827 } 828 break; 829 830 default: 831 break; 832 } 833 } 834 835 return num_formats; 836 } 837 838 int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev, 839 const struct amdgpu_framebuffer *afb, 840 const enum surface_pixel_format format, 841 const enum dc_rotation_angle rotation, 842 const uint64_t tiling_flags, 843 struct dc_tiling_info *tiling_info, 844 struct plane_size *plane_size, 845 struct dc_plane_dcc_param *dcc, 846 struct dc_plane_address *address, 847 bool tmz_surface) 848 { 849 const struct drm_framebuffer *fb = &afb->base; 850 int ret; 851 852 memset(tiling_info, 0, sizeof(*tiling_info)); 853 memset(plane_size, 0, sizeof(*plane_size)); 854 memset(dcc, 0, sizeof(*dcc)); 855 memset(address, 0, sizeof(*address)); 856 857 address->tmz_surface = tmz_surface; 858 859 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 860 uint64_t addr = afb->address + fb->offsets[0]; 861 862 plane_size->surface_size.x = 0; 863 plane_size->surface_size.y = 0; 864 plane_size->surface_size.width = fb->width; 865 plane_size->surface_size.height = fb->height; 866 plane_size->surface_pitch = 867 fb->pitches[0] / fb->format->cpp[0]; 868 869 address->type = PLN_ADDR_TYPE_GRAPHICS; 870 address->grph.addr.low_part = lower_32_bits(addr); 871 address->grph.addr.high_part = upper_32_bits(addr); 872 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { 873 uint64_t luma_addr = afb->address + fb->offsets[0]; 874 uint64_t chroma_addr = afb->address + fb->offsets[1]; 875 876 plane_size->surface_size.x = 0; 877 plane_size->surface_size.y = 0; 878 plane_size->surface_size.width = fb->width; 879 plane_size->surface_size.height = fb->height; 880 plane_size->surface_pitch = 881 fb->pitches[0] / fb->format->cpp[0]; 882 883 plane_size->chroma_size.x = 0; 884 plane_size->chroma_size.y = 0; 885 /* TODO: set these based on surface format */ 886 plane_size->chroma_size.width = fb->width / 2; 887 plane_size->chroma_size.height = fb->height / 2; 888 889 plane_size->chroma_pitch = 890 fb->pitches[1] / fb->format->cpp[1]; 891 892 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 893 address->video_progressive.luma_addr.low_part = 894 lower_32_bits(luma_addr); 895 address->video_progressive.luma_addr.high_part = 896 upper_32_bits(luma_addr); 897 address->video_progressive.chroma_addr.low_part = 898 lower_32_bits(chroma_addr); 899 address->video_progressive.chroma_addr.high_part = 900 upper_32_bits(chroma_addr); 901 } 902 903 if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) { 904 ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format, 905 rotation, plane_size, 906 tiling_info, dcc, 907 address); 908 if (ret) 909 return ret; 910 } else if (adev->family >= AMDGPU_FAMILY_AI) { 911 ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, 912 rotation, plane_size, 913 tiling_info, dcc, 914 address); 915 if (ret) 916 return ret; 917 } else { 918 amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); 919 } 920 921 return 0; 922 } 923 924 static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane, 925 struct drm_plane_state *new_state) 926 { 927 struct amdgpu_framebuffer *afb; 928 struct drm_gem_object *obj; 929 struct amdgpu_device *adev; 930 struct amdgpu_bo *rbo; 931 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 932 uint32_t domain; 933 int r; 934 935 if (!new_state->fb) { 936 DRM_DEBUG_KMS("No FB bound\n"); 937 return 0; 938 } 939 940 afb = to_amdgpu_framebuffer(new_state->fb); 941 obj = drm_gem_fb_get_obj(new_state->fb, 0); 942 if (!obj) { 943 DRM_ERROR("Failed to get obj from framebuffer\n"); 944 return -EINVAL; 945 } 946 947 rbo = gem_to_amdgpu_bo(obj); 948 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 949 r = amdgpu_bo_reserve(rbo, true); 950 if (r) { 951 dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 952 return r; 953 } 954 955 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); 956 if (r) { 957 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); 958 goto error_unlock; 959 } 960 961 if (plane->type != DRM_PLANE_TYPE_CURSOR) 962 domain = amdgpu_display_supported_domains(adev, rbo->flags); 963 else 964 domain = AMDGPU_GEM_DOMAIN_VRAM; 965 966 rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 967 r = amdgpu_bo_pin(rbo, domain); 968 if (unlikely(r != 0)) { 969 if (r != -ERESTARTSYS) 970 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 971 goto error_unlock; 972 } 973 974 r = amdgpu_ttm_alloc_gart(&rbo->tbo); 975 if (unlikely(r != 0)) { 976 DRM_ERROR("%p bind failed\n", rbo); 977 goto error_unpin; 978 } 979 980 r = drm_gem_plane_helper_prepare_fb(plane, new_state); 981 if (unlikely(r != 0)) 982 goto error_unpin; 983 984 amdgpu_bo_unreserve(rbo); 985 986 afb->address = amdgpu_bo_gpu_offset(rbo); 987 988 amdgpu_bo_ref(rbo); 989 990 /** 991 * We don't do surface updates on planes that have been newly created, 992 * but we also don't have the afb->address during atomic check. 993 * 994 * Fill in buffer attributes depending on the address here, but only on 995 * newly created planes since they're not being used by DC yet and this 996 * won't modify global state. 997 */ 998 dm_plane_state_old = to_dm_plane_state(plane->state); 999 dm_plane_state_new = to_dm_plane_state(new_state); 1000 1001 if (dm_plane_state_new->dc_state && 1002 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { 1003 struct dc_plane_state *plane_state = 1004 dm_plane_state_new->dc_state; 1005 1006 amdgpu_dm_plane_fill_plane_buffer_attributes( 1007 adev, afb, plane_state->format, plane_state->rotation, 1008 afb->tiling_flags, 1009 &plane_state->tiling_info, &plane_state->plane_size, 1010 &plane_state->dcc, &plane_state->address, 1011 afb->tmz_surface); 1012 } 1013 1014 return 0; 1015 1016 error_unpin: 1017 amdgpu_bo_unpin(rbo); 1018 1019 error_unlock: 1020 amdgpu_bo_unreserve(rbo); 1021 return r; 1022 } 1023 1024 static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane, 1025 struct drm_plane_state *old_state) 1026 { 1027 struct amdgpu_bo *rbo; 1028 int r; 1029 1030 if (!old_state->fb) 1031 return; 1032 1033 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 1034 r = amdgpu_bo_reserve(rbo, false); 1035 if (unlikely(r)) { 1036 DRM_ERROR("failed to reserve rbo before unpin\n"); 1037 return; 1038 } 1039 1040 amdgpu_bo_unpin(rbo); 1041 amdgpu_bo_unreserve(rbo); 1042 amdgpu_bo_unref(&rbo); 1043 } 1044 1045 static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev, 1046 struct drm_framebuffer *fb, 1047 int *min_downscale, int *max_upscale) 1048 { 1049 struct amdgpu_device *adev = drm_to_adev(dev); 1050 struct dc *dc = adev->dm.dc; 1051 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ 1052 struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; 1053 1054 switch (fb->format->format) { 1055 case DRM_FORMAT_P010: 1056 case DRM_FORMAT_NV12: 1057 case DRM_FORMAT_NV21: 1058 *max_upscale = plane_cap->max_upscale_factor.nv12; 1059 *min_downscale = plane_cap->max_downscale_factor.nv12; 1060 break; 1061 1062 case DRM_FORMAT_XRGB16161616F: 1063 case DRM_FORMAT_ARGB16161616F: 1064 case DRM_FORMAT_XBGR16161616F: 1065 case DRM_FORMAT_ABGR16161616F: 1066 *max_upscale = plane_cap->max_upscale_factor.fp16; 1067 *min_downscale = plane_cap->max_downscale_factor.fp16; 1068 break; 1069 1070 default: 1071 *max_upscale = plane_cap->max_upscale_factor.argb8888; 1072 *min_downscale = plane_cap->max_downscale_factor.argb8888; 1073 break; 1074 } 1075 1076 /* 1077 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a 1078 * scaling factor of 1.0 == 1000 units. 1079 */ 1080 if (*max_upscale == 1) 1081 *max_upscale = 1000; 1082 1083 if (*min_downscale == 1) 1084 *min_downscale = 1000; 1085 } 1086 1087 int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state, 1088 struct drm_crtc_state *new_crtc_state) 1089 { 1090 struct drm_framebuffer *fb = state->fb; 1091 int min_downscale, max_upscale; 1092 int min_scale = 0; 1093 int max_scale = INT_MAX; 1094 1095 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ 1096 if (fb && state->crtc) { 1097 /* Validate viewport to cover the case when only the position changes */ 1098 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { 1099 int viewport_width = state->crtc_w; 1100 int viewport_height = state->crtc_h; 1101 1102 if (state->crtc_x < 0) 1103 viewport_width += state->crtc_x; 1104 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) 1105 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; 1106 1107 if (state->crtc_y < 0) 1108 viewport_height += state->crtc_y; 1109 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) 1110 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; 1111 1112 if (viewport_width < 0 || viewport_height < 0) { 1113 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); 1114 return -EINVAL; 1115 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ 1116 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); 1117 return -EINVAL; 1118 } else if (viewport_height < MIN_VIEWPORT_SIZE) { 1119 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); 1120 return -EINVAL; 1121 } 1122 1123 } 1124 1125 /* Get min/max allowed scaling factors from plane caps. */ 1126 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb, 1127 &min_downscale, &max_upscale); 1128 /* 1129 * Convert to drm convention: 16.16 fixed point, instead of dc's 1130 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's 1131 * dst/src, so min_scale = 1.0 / max_upscale, etc. 1132 */ 1133 min_scale = (1000 << 16) / max_upscale; 1134 max_scale = (1000 << 16) / min_downscale; 1135 } 1136 1137 return drm_atomic_helper_check_plane_state( 1138 state, new_crtc_state, min_scale, max_scale, true, true); 1139 } 1140 1141 int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, 1142 const struct drm_plane_state *state, 1143 struct dc_scaling_info *scaling_info) 1144 { 1145 int scale_w, scale_h, min_downscale, max_upscale; 1146 1147 memset(scaling_info, 0, sizeof(*scaling_info)); 1148 1149 /* Source is fixed 16.16 but we ignore mantissa for now... */ 1150 scaling_info->src_rect.x = state->src_x >> 16; 1151 scaling_info->src_rect.y = state->src_y >> 16; 1152 1153 /* 1154 * For reasons we don't (yet) fully understand a non-zero 1155 * src_y coordinate into an NV12 buffer can cause a 1156 * system hang on DCN1x. 1157 * To avoid hangs (and maybe be overly cautious) 1158 * let's reject both non-zero src_x and src_y. 1159 * 1160 * We currently know of only one use-case to reproduce a 1161 * scenario with non-zero src_x and src_y for NV12, which 1162 * is to gesture the YouTube Android app into full screen 1163 * on ChromeOS. 1164 */ 1165 if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || 1166 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) && 1167 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && 1168 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) 1169 return -EINVAL; 1170 1171 scaling_info->src_rect.width = state->src_w >> 16; 1172 if (scaling_info->src_rect.width == 0) 1173 return -EINVAL; 1174 1175 scaling_info->src_rect.height = state->src_h >> 16; 1176 if (scaling_info->src_rect.height == 0) 1177 return -EINVAL; 1178 1179 scaling_info->dst_rect.x = state->crtc_x; 1180 scaling_info->dst_rect.y = state->crtc_y; 1181 1182 if (state->crtc_w == 0) 1183 return -EINVAL; 1184 1185 scaling_info->dst_rect.width = state->crtc_w; 1186 1187 if (state->crtc_h == 0) 1188 return -EINVAL; 1189 1190 scaling_info->dst_rect.height = state->crtc_h; 1191 1192 /* DRM doesn't specify clipping on destination output. */ 1193 scaling_info->clip_rect = scaling_info->dst_rect; 1194 1195 /* Validate scaling per-format with DC plane caps */ 1196 if (state->plane && state->plane->dev && state->fb) { 1197 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb, 1198 &min_downscale, &max_upscale); 1199 } else { 1200 min_downscale = 250; 1201 max_upscale = 16000; 1202 } 1203 1204 scale_w = scaling_info->dst_rect.width * 1000 / 1205 scaling_info->src_rect.width; 1206 1207 if (scale_w < min_downscale || scale_w > max_upscale) 1208 return -EINVAL; 1209 1210 scale_h = scaling_info->dst_rect.height * 1000 / 1211 scaling_info->src_rect.height; 1212 1213 if (scale_h < min_downscale || scale_h > max_upscale) 1214 return -EINVAL; 1215 1216 /* 1217 * The "scaling_quality" can be ignored for now, quality = 0 has DC 1218 * assume reasonable defaults based on the format. 1219 */ 1220 1221 return 0; 1222 } 1223 1224 static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane, 1225 struct drm_atomic_state *state) 1226 { 1227 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 1228 plane); 1229 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1230 struct dc *dc = adev->dm.dc; 1231 struct dm_plane_state *dm_plane_state; 1232 struct dc_scaling_info scaling_info; 1233 struct drm_crtc_state *new_crtc_state; 1234 int ret; 1235 1236 trace_amdgpu_dm_plane_atomic_check(new_plane_state); 1237 1238 dm_plane_state = to_dm_plane_state(new_plane_state); 1239 1240 if (!dm_plane_state->dc_state) 1241 return 0; 1242 1243 new_crtc_state = 1244 drm_atomic_get_new_crtc_state(state, 1245 new_plane_state->crtc); 1246 if (!new_crtc_state) 1247 return -EINVAL; 1248 1249 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); 1250 if (ret) 1251 return ret; 1252 1253 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info); 1254 if (ret) 1255 return ret; 1256 1257 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 1258 return 0; 1259 1260 return -EINVAL; 1261 } 1262 1263 static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane, 1264 struct drm_atomic_state *state, bool flip) 1265 { 1266 struct drm_crtc_state *new_crtc_state; 1267 struct drm_plane_state *new_plane_state; 1268 struct dm_crtc_state *dm_new_crtc_state; 1269 1270 if (flip) { 1271 if (plane->type != DRM_PLANE_TYPE_OVERLAY) 1272 return -EINVAL; 1273 } else if (plane->type != DRM_PLANE_TYPE_CURSOR) { 1274 return -EINVAL; 1275 } 1276 1277 new_plane_state = drm_atomic_get_new_plane_state(state, plane); 1278 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 1279 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 1280 /* Reject overlay cursors for now*/ 1281 if (!flip && dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) 1282 return -EINVAL; 1283 1284 return 0; 1285 } 1286 1287 int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, 1288 struct dc_cursor_position *position) 1289 { 1290 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1291 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1292 int x, y; 1293 int xorigin = 0, yorigin = 0; 1294 1295 if (!crtc || !plane->state->fb) 1296 return 0; 1297 1298 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || 1299 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { 1300 DRM_ERROR("%s: bad cursor width or height %d x %d\n", 1301 __func__, 1302 plane->state->crtc_w, 1303 plane->state->crtc_h); 1304 return -EINVAL; 1305 } 1306 1307 x = plane->state->crtc_x; 1308 y = plane->state->crtc_y; 1309 1310 if (x <= -amdgpu_crtc->max_cursor_width || 1311 y <= -amdgpu_crtc->max_cursor_height) 1312 return 0; 1313 1314 if (x < 0) { 1315 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 1316 x = 0; 1317 } 1318 if (y < 0) { 1319 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 1320 y = 0; 1321 } 1322 position->enable = true; 1323 position->x = x; 1324 position->y = y; 1325 position->x_hotspot = xorigin; 1326 position->y_hotspot = yorigin; 1327 1328 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(4, 0, 1)) 1329 position->translate_by_source = true; 1330 1331 return 0; 1332 } 1333 1334 void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, 1335 struct drm_plane_state *old_plane_state) 1336 { 1337 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1338 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 1339 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 1340 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 1341 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1342 uint64_t address = afb ? afb->address : 0; 1343 struct dc_cursor_position position = {0}; 1344 struct dc_cursor_attributes attributes; 1345 int ret; 1346 1347 if (!plane->state->fb && !old_plane_state->fb) 1348 return; 1349 1350 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", 1351 amdgpu_crtc->crtc_id, plane->state->crtc_w, 1352 plane->state->crtc_h); 1353 1354 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); 1355 if (ret) 1356 return; 1357 1358 if (!position.enable) { 1359 /* turn off cursor */ 1360 if (crtc_state && crtc_state->stream) { 1361 mutex_lock(&adev->dm.dc_lock); 1362 dc_stream_program_cursor_position(crtc_state->stream, 1363 &position); 1364 mutex_unlock(&adev->dm.dc_lock); 1365 } 1366 return; 1367 } 1368 1369 amdgpu_crtc->cursor_width = plane->state->crtc_w; 1370 amdgpu_crtc->cursor_height = plane->state->crtc_h; 1371 1372 memset(&attributes, 0, sizeof(attributes)); 1373 attributes.address.high_part = upper_32_bits(address); 1374 attributes.address.low_part = lower_32_bits(address); 1375 attributes.width = plane->state->crtc_w; 1376 attributes.height = plane->state->crtc_h; 1377 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 1378 attributes.rotation_angle = 0; 1379 attributes.attribute_flags.value = 0; 1380 1381 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM 1382 * legacy gamma setup. 1383 */ 1384 if (crtc_state->cm_is_degamma_srgb && 1385 adev->dm.dc->caps.color.dpp.gamma_corr) 1386 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; 1387 1388 if (afb) 1389 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 1390 1391 if (crtc_state->stream) { 1392 mutex_lock(&adev->dm.dc_lock); 1393 if (!dc_stream_program_cursor_attributes(crtc_state->stream, 1394 &attributes)) 1395 DRM_ERROR("DC failed to set cursor attributes\n"); 1396 1397 if (!dc_stream_program_cursor_position(crtc_state->stream, 1398 &position)) 1399 DRM_ERROR("DC failed to set cursor position\n"); 1400 mutex_unlock(&adev->dm.dc_lock); 1401 } 1402 } 1403 1404 static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane, 1405 struct drm_atomic_state *state) 1406 { 1407 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 1408 plane); 1409 struct drm_plane_state *old_state = 1410 drm_atomic_get_old_plane_state(state, plane); 1411 1412 trace_amdgpu_dm_atomic_update_cursor(new_state); 1413 1414 swap(plane->state->fb, new_state->fb); 1415 1416 plane->state->src_x = new_state->src_x; 1417 plane->state->src_y = new_state->src_y; 1418 plane->state->src_w = new_state->src_w; 1419 plane->state->src_h = new_state->src_h; 1420 plane->state->crtc_x = new_state->crtc_x; 1421 plane->state->crtc_y = new_state->crtc_y; 1422 plane->state->crtc_w = new_state->crtc_w; 1423 plane->state->crtc_h = new_state->crtc_h; 1424 1425 amdgpu_dm_plane_handle_cursor_update(plane, old_state); 1426 } 1427 1428 static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane) 1429 { 1430 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane->state); 1431 struct drm_framebuffer *fb = plane->state->fb; 1432 struct dc_plane_state *dc_plane_state; 1433 1434 if (!dm_plane_state || !dm_plane_state->dc_state) 1435 return; 1436 1437 dc_plane_state = dm_plane_state->dc_state; 1438 1439 dc_plane_force_dcc_and_tiling_disable(dc_plane_state, fb->modifier ? true : false); 1440 } 1441 1442 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { 1443 .prepare_fb = amdgpu_dm_plane_helper_prepare_fb, 1444 .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb, 1445 .atomic_check = amdgpu_dm_plane_atomic_check, 1446 .atomic_async_check = amdgpu_dm_plane_atomic_async_check, 1447 .atomic_async_update = amdgpu_dm_plane_atomic_async_update 1448 }; 1449 1450 static const struct drm_plane_helper_funcs dm_primary_plane_helper_funcs = { 1451 .prepare_fb = amdgpu_dm_plane_helper_prepare_fb, 1452 .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb, 1453 .atomic_check = amdgpu_dm_plane_atomic_check, 1454 .atomic_async_check = amdgpu_dm_plane_atomic_async_check, 1455 .atomic_async_update = amdgpu_dm_plane_atomic_async_update, 1456 .get_scanout_buffer = amdgpu_display_get_scanout_buffer, 1457 .panic_flush = amdgpu_dm_plane_panic_flush, 1458 }; 1459 1460 static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane) 1461 { 1462 struct dm_plane_state *amdgpu_state = NULL; 1463 1464 if (plane->state) 1465 plane->funcs->atomic_destroy_state(plane, plane->state); 1466 1467 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); 1468 WARN_ON(amdgpu_state == NULL); 1469 1470 if (!amdgpu_state) 1471 return; 1472 1473 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); 1474 amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; 1475 amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT; 1476 amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; 1477 amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; 1478 } 1479 1480 static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane) 1481 { 1482 struct dm_plane_state *dm_plane_state, *old_dm_plane_state; 1483 1484 old_dm_plane_state = to_dm_plane_state(plane->state); 1485 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); 1486 if (!dm_plane_state) 1487 return NULL; 1488 1489 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); 1490 1491 if (old_dm_plane_state->dc_state) { 1492 dm_plane_state->dc_state = old_dm_plane_state->dc_state; 1493 dc_plane_state_retain(dm_plane_state->dc_state); 1494 } 1495 1496 if (old_dm_plane_state->degamma_lut) 1497 dm_plane_state->degamma_lut = 1498 drm_property_blob_get(old_dm_plane_state->degamma_lut); 1499 if (old_dm_plane_state->ctm) 1500 dm_plane_state->ctm = 1501 drm_property_blob_get(old_dm_plane_state->ctm); 1502 if (old_dm_plane_state->shaper_lut) 1503 dm_plane_state->shaper_lut = 1504 drm_property_blob_get(old_dm_plane_state->shaper_lut); 1505 if (old_dm_plane_state->lut3d) 1506 dm_plane_state->lut3d = 1507 drm_property_blob_get(old_dm_plane_state->lut3d); 1508 if (old_dm_plane_state->blend_lut) 1509 dm_plane_state->blend_lut = 1510 drm_property_blob_get(old_dm_plane_state->blend_lut); 1511 1512 dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf; 1513 dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult; 1514 dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf; 1515 dm_plane_state->blend_tf = old_dm_plane_state->blend_tf; 1516 1517 return &dm_plane_state->base; 1518 } 1519 1520 static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane, 1521 uint32_t format, 1522 uint64_t modifier) 1523 { 1524 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1525 const struct drm_format_info *info = drm_format_info(format); 1526 int i; 1527 1528 if (!info) 1529 return false; 1530 1531 /* 1532 * We always have to allow these modifiers: 1533 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. 1534 * 2. Not passing any modifiers is the same as explicitly passing INVALID. 1535 */ 1536 if (modifier == DRM_FORMAT_MOD_LINEAR || 1537 modifier == DRM_FORMAT_MOD_INVALID) { 1538 return true; 1539 } 1540 1541 /* Check that the modifier is on the list of the plane's supported modifiers. */ 1542 for (i = 0; i < plane->modifier_count; i++) { 1543 if (modifier == plane->modifiers[i]) 1544 break; 1545 } 1546 if (i == plane->modifier_count) 1547 return false; 1548 1549 /* GFX12 doesn't have these limitations. */ 1550 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) { 1551 enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3; 1552 1553 /* 1554 * For D swizzle the canonical modifier depends on the bpp, so check 1555 * it here. 1556 */ 1557 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && 1558 adev->family >= AMDGPU_FAMILY_NV) { 1559 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) 1560 return false; 1561 } 1562 1563 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && 1564 info->cpp[0] < 8) 1565 return false; 1566 1567 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { 1568 /* Per radeonsi comments 16/64 bpp are more complicated. */ 1569 if (info->cpp[0] != 4) 1570 return false; 1571 /* We support multi-planar formats, but not when combined with 1572 * additional DCC metadata planes. 1573 */ 1574 if (info->num_planes > 1) 1575 return false; 1576 } 1577 } 1578 1579 return true; 1580 } 1581 1582 static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, 1583 struct drm_plane_state *state) 1584 { 1585 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1586 1587 if (dm_plane_state->degamma_lut) 1588 drm_property_blob_put(dm_plane_state->degamma_lut); 1589 if (dm_plane_state->ctm) 1590 drm_property_blob_put(dm_plane_state->ctm); 1591 if (dm_plane_state->lut3d) 1592 drm_property_blob_put(dm_plane_state->lut3d); 1593 if (dm_plane_state->shaper_lut) 1594 drm_property_blob_put(dm_plane_state->shaper_lut); 1595 if (dm_plane_state->blend_lut) 1596 drm_property_blob_put(dm_plane_state->blend_lut); 1597 1598 if (dm_plane_state->dc_state) 1599 dc_plane_state_release(dm_plane_state->dc_state); 1600 1601 drm_atomic_helper_plane_destroy_state(plane, state); 1602 } 1603 1604 #ifdef AMD_PRIVATE_COLOR 1605 static void 1606 dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm, 1607 struct drm_plane *plane) 1608 { 1609 struct amdgpu_mode_info mode_info = dm->adev->mode_info; 1610 struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp; 1611 1612 /* Check HW color pipeline capabilities on DPP block (pre-blending) 1613 * before exposing related properties. 1614 */ 1615 if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) { 1616 drm_object_attach_property(&plane->base, 1617 mode_info.plane_degamma_lut_property, 1618 0); 1619 drm_object_attach_property(&plane->base, 1620 mode_info.plane_degamma_lut_size_property, 1621 MAX_COLOR_LUT_ENTRIES); 1622 drm_object_attach_property(&plane->base, 1623 dm->adev->mode_info.plane_degamma_tf_property, 1624 AMDGPU_TRANSFER_FUNCTION_DEFAULT); 1625 } 1626 /* HDR MULT is always available */ 1627 drm_object_attach_property(&plane->base, 1628 dm->adev->mode_info.plane_hdr_mult_property, 1629 AMDGPU_HDR_MULT_DEFAULT); 1630 1631 /* Only enable plane CTM if both DPP and MPC gamut remap is available. */ 1632 if (dm->dc->caps.color.mpc.gamut_remap) 1633 drm_object_attach_property(&plane->base, 1634 dm->adev->mode_info.plane_ctm_property, 0); 1635 1636 if (dpp_color_caps.hw_3d_lut) { 1637 drm_object_attach_property(&plane->base, 1638 mode_info.plane_shaper_lut_property, 0); 1639 drm_object_attach_property(&plane->base, 1640 mode_info.plane_shaper_lut_size_property, 1641 MAX_COLOR_LUT_ENTRIES); 1642 drm_object_attach_property(&plane->base, 1643 mode_info.plane_shaper_tf_property, 1644 AMDGPU_TRANSFER_FUNCTION_DEFAULT); 1645 drm_object_attach_property(&plane->base, 1646 mode_info.plane_lut3d_property, 0); 1647 drm_object_attach_property(&plane->base, 1648 mode_info.plane_lut3d_size_property, 1649 MAX_COLOR_3DLUT_SIZE); 1650 } 1651 1652 if (dpp_color_caps.ogam_ram) { 1653 drm_object_attach_property(&plane->base, 1654 mode_info.plane_blend_lut_property, 0); 1655 drm_object_attach_property(&plane->base, 1656 mode_info.plane_blend_lut_size_property, 1657 MAX_COLOR_LUT_ENTRIES); 1658 drm_object_attach_property(&plane->base, 1659 mode_info.plane_blend_tf_property, 1660 AMDGPU_TRANSFER_FUNCTION_DEFAULT); 1661 } 1662 } 1663 1664 static int 1665 dm_atomic_plane_set_property(struct drm_plane *plane, 1666 struct drm_plane_state *state, 1667 struct drm_property *property, 1668 uint64_t val) 1669 { 1670 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1671 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1672 bool replaced = false; 1673 int ret; 1674 1675 if (property == adev->mode_info.plane_degamma_lut_property) { 1676 ret = drm_property_replace_blob_from_id(plane->dev, 1677 &dm_plane_state->degamma_lut, 1678 val, -1, 1679 sizeof(struct drm_color_lut), 1680 &replaced); 1681 dm_plane_state->base.color_mgmt_changed |= replaced; 1682 return ret; 1683 } else if (property == adev->mode_info.plane_degamma_tf_property) { 1684 if (dm_plane_state->degamma_tf != val) { 1685 dm_plane_state->degamma_tf = val; 1686 dm_plane_state->base.color_mgmt_changed = 1; 1687 } 1688 } else if (property == adev->mode_info.plane_hdr_mult_property) { 1689 if (dm_plane_state->hdr_mult != val) { 1690 dm_plane_state->hdr_mult = val; 1691 dm_plane_state->base.color_mgmt_changed = 1; 1692 } 1693 } else if (property == adev->mode_info.plane_ctm_property) { 1694 ret = drm_property_replace_blob_from_id(plane->dev, 1695 &dm_plane_state->ctm, 1696 val, 1697 sizeof(struct drm_color_ctm_3x4), -1, 1698 &replaced); 1699 dm_plane_state->base.color_mgmt_changed |= replaced; 1700 return ret; 1701 } else if (property == adev->mode_info.plane_shaper_lut_property) { 1702 ret = drm_property_replace_blob_from_id(plane->dev, 1703 &dm_plane_state->shaper_lut, 1704 val, -1, 1705 sizeof(struct drm_color_lut), 1706 &replaced); 1707 dm_plane_state->base.color_mgmt_changed |= replaced; 1708 return ret; 1709 } else if (property == adev->mode_info.plane_shaper_tf_property) { 1710 if (dm_plane_state->shaper_tf != val) { 1711 dm_plane_state->shaper_tf = val; 1712 dm_plane_state->base.color_mgmt_changed = 1; 1713 } 1714 } else if (property == adev->mode_info.plane_lut3d_property) { 1715 ret = drm_property_replace_blob_from_id(plane->dev, 1716 &dm_plane_state->lut3d, 1717 val, -1, 1718 sizeof(struct drm_color_lut), 1719 &replaced); 1720 dm_plane_state->base.color_mgmt_changed |= replaced; 1721 return ret; 1722 } else if (property == adev->mode_info.plane_blend_lut_property) { 1723 ret = drm_property_replace_blob_from_id(plane->dev, 1724 &dm_plane_state->blend_lut, 1725 val, -1, 1726 sizeof(struct drm_color_lut), 1727 &replaced); 1728 dm_plane_state->base.color_mgmt_changed |= replaced; 1729 return ret; 1730 } else if (property == adev->mode_info.plane_blend_tf_property) { 1731 if (dm_plane_state->blend_tf != val) { 1732 dm_plane_state->blend_tf = val; 1733 dm_plane_state->base.color_mgmt_changed = 1; 1734 } 1735 } else { 1736 drm_dbg_atomic(plane->dev, 1737 "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", 1738 plane->base.id, plane->name, 1739 property->base.id, property->name); 1740 return -EINVAL; 1741 } 1742 1743 return 0; 1744 } 1745 1746 static int 1747 dm_atomic_plane_get_property(struct drm_plane *plane, 1748 const struct drm_plane_state *state, 1749 struct drm_property *property, 1750 uint64_t *val) 1751 { 1752 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1753 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1754 1755 if (property == adev->mode_info.plane_degamma_lut_property) { 1756 *val = (dm_plane_state->degamma_lut) ? 1757 dm_plane_state->degamma_lut->base.id : 0; 1758 } else if (property == adev->mode_info.plane_degamma_tf_property) { 1759 *val = dm_plane_state->degamma_tf; 1760 } else if (property == adev->mode_info.plane_hdr_mult_property) { 1761 *val = dm_plane_state->hdr_mult; 1762 } else if (property == adev->mode_info.plane_ctm_property) { 1763 *val = (dm_plane_state->ctm) ? 1764 dm_plane_state->ctm->base.id : 0; 1765 } else if (property == adev->mode_info.plane_shaper_lut_property) { 1766 *val = (dm_plane_state->shaper_lut) ? 1767 dm_plane_state->shaper_lut->base.id : 0; 1768 } else if (property == adev->mode_info.plane_shaper_tf_property) { 1769 *val = dm_plane_state->shaper_tf; 1770 } else if (property == adev->mode_info.plane_lut3d_property) { 1771 *val = (dm_plane_state->lut3d) ? 1772 dm_plane_state->lut3d->base.id : 0; 1773 } else if (property == adev->mode_info.plane_blend_lut_property) { 1774 *val = (dm_plane_state->blend_lut) ? 1775 dm_plane_state->blend_lut->base.id : 0; 1776 } else if (property == adev->mode_info.plane_blend_tf_property) { 1777 *val = dm_plane_state->blend_tf; 1778 1779 } else { 1780 return -EINVAL; 1781 } 1782 1783 return 0; 1784 } 1785 #endif 1786 1787 static const struct drm_plane_funcs dm_plane_funcs = { 1788 .update_plane = drm_atomic_helper_update_plane, 1789 .disable_plane = drm_atomic_helper_disable_plane, 1790 .destroy = drm_plane_helper_destroy, 1791 .reset = amdgpu_dm_plane_drm_plane_reset, 1792 .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state, 1793 .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state, 1794 .format_mod_supported = amdgpu_dm_plane_format_mod_supported, 1795 #ifdef AMD_PRIVATE_COLOR 1796 .atomic_set_property = dm_atomic_plane_set_property, 1797 .atomic_get_property = dm_atomic_plane_get_property, 1798 #endif 1799 }; 1800 1801 int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 1802 struct drm_plane *plane, 1803 unsigned long possible_crtcs, 1804 const struct dc_plane_cap *plane_cap) 1805 { 1806 uint32_t formats[32]; 1807 int num_formats; 1808 int res = -EPERM; 1809 unsigned int supported_rotations; 1810 uint64_t *modifiers = NULL; 1811 unsigned int primary_zpos = dm->dc->caps.max_slave_planes; 1812 1813 num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats, 1814 ARRAY_SIZE(formats)); 1815 1816 res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers); 1817 if (res) 1818 return res; 1819 1820 if (modifiers == NULL) 1821 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true; 1822 1823 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, 1824 &dm_plane_funcs, formats, num_formats, 1825 modifiers, plane->type, NULL); 1826 kfree(modifiers); 1827 if (res) 1828 return res; 1829 1830 if (plane->type == DRM_PLANE_TYPE_OVERLAY && 1831 plane_cap && plane_cap->per_pixel_alpha) { 1832 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | 1833 BIT(DRM_MODE_BLEND_PREMULTI) | 1834 BIT(DRM_MODE_BLEND_COVERAGE); 1835 1836 drm_plane_create_alpha_property(plane); 1837 drm_plane_create_blend_mode_property(plane, blend_caps); 1838 } 1839 1840 if (plane->type == DRM_PLANE_TYPE_PRIMARY) { 1841 /* 1842 * Allow OVERLAY planes to be used as underlays by assigning an 1843 * immutable zpos = # of OVERLAY planes to the PRIMARY plane. 1844 */ 1845 drm_plane_create_zpos_immutable_property(plane, primary_zpos); 1846 } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) { 1847 /* 1848 * OVERLAY planes can be below or above the PRIMARY, but cannot 1849 * be above the CURSOR plane. 1850 */ 1851 unsigned int zpos = primary_zpos + 1 + drm_plane_index(plane); 1852 1853 drm_plane_create_zpos_property(plane, zpos, 0, 254); 1854 } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { 1855 drm_plane_create_zpos_immutable_property(plane, 255); 1856 } 1857 1858 if (plane->type == DRM_PLANE_TYPE_PRIMARY && 1859 plane_cap && 1860 (plane_cap->pixel_format_support.nv12 || 1861 plane_cap->pixel_format_support.p010)) { 1862 /* This only affects YUV formats. */ 1863 drm_plane_create_color_properties( 1864 plane, 1865 BIT(DRM_COLOR_YCBCR_BT601) | 1866 BIT(DRM_COLOR_YCBCR_BT709) | 1867 BIT(DRM_COLOR_YCBCR_BT2020), 1868 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 1869 BIT(DRM_COLOR_YCBCR_FULL_RANGE), 1870 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); 1871 } 1872 1873 supported_rotations = 1874 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | 1875 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; 1876 1877 if (dm->adev->asic_type >= CHIP_BONAIRE && 1878 plane->type != DRM_PLANE_TYPE_CURSOR) 1879 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 1880 supported_rotations); 1881 1882 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) && 1883 plane->type != DRM_PLANE_TYPE_CURSOR) 1884 drm_plane_enable_fb_damage_clips(plane); 1885 1886 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 1887 drm_plane_helper_add(plane, &dm_primary_plane_helper_funcs); 1888 else 1889 drm_plane_helper_add(plane, &dm_plane_helper_funcs); 1890 1891 #ifdef AMD_PRIVATE_COLOR 1892 dm_atomic_plane_attach_color_mgmt_properties(dm, plane); 1893 #endif 1894 /* Create (reset) the plane state */ 1895 if (plane->funcs->reset) 1896 plane->funcs->reset(plane); 1897 1898 return 0; 1899 } 1900 1901 bool amdgpu_dm_plane_is_video_format(uint32_t format) 1902 { 1903 int i; 1904 1905 for (i = 0; i < ARRAY_SIZE(video_formats); i++) 1906 if (format == video_formats[i]) 1907 return true; 1908 1909 return false; 1910 } 1911 1912