1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: AMD 24 * 25 */ 26 27 #include <drm/drm_atomic_helper.h> 28 #include <drm/drm_blend.h> 29 #include "drm/drm_framebuffer.h" 30 #include <drm/drm_gem_atomic_helper.h> 31 #include <drm/drm_plane_helper.h> 32 #include <drm/drm_gem_framebuffer_helper.h> 33 #include <drm/drm_fourcc.h> 34 35 #include "amdgpu.h" 36 #include "dal_asic_id.h" 37 #include "amdgpu_display.h" 38 #include "amdgpu_dm_trace.h" 39 #include "amdgpu_dm_plane.h" 40 #include "gc/gc_11_0_0_offset.h" 41 #include "gc/gc_11_0_0_sh_mask.h" 42 43 /* 44 * TODO: these are currently initialized to rgb formats only. 45 * For future use cases we should either initialize them dynamically based on 46 * plane capabilities, or initialize this array to all formats, so internal drm 47 * check will succeed, and let DC implement proper check 48 */ 49 static const uint32_t rgb_formats[] = { 50 DRM_FORMAT_XRGB8888, 51 DRM_FORMAT_ARGB8888, 52 DRM_FORMAT_RGBA8888, 53 DRM_FORMAT_XRGB2101010, 54 DRM_FORMAT_XBGR2101010, 55 DRM_FORMAT_ARGB2101010, 56 DRM_FORMAT_ABGR2101010, 57 DRM_FORMAT_XRGB16161616, 58 DRM_FORMAT_XBGR16161616, 59 DRM_FORMAT_ARGB16161616, 60 DRM_FORMAT_ABGR16161616, 61 DRM_FORMAT_XBGR8888, 62 DRM_FORMAT_ABGR8888, 63 DRM_FORMAT_RGB565, 64 }; 65 66 static const uint32_t overlay_formats[] = { 67 DRM_FORMAT_XRGB8888, 68 DRM_FORMAT_ARGB8888, 69 DRM_FORMAT_RGBA8888, 70 DRM_FORMAT_XBGR8888, 71 DRM_FORMAT_ABGR8888, 72 DRM_FORMAT_RGB565, 73 DRM_FORMAT_NV21, 74 DRM_FORMAT_NV12, 75 DRM_FORMAT_P010 76 }; 77 78 static const uint32_t video_formats[] = { 79 DRM_FORMAT_NV21, 80 DRM_FORMAT_NV12, 81 DRM_FORMAT_P010 82 }; 83 84 static const u32 cursor_formats[] = { 85 DRM_FORMAT_ARGB8888 86 }; 87 88 enum dm_micro_swizzle { 89 MICRO_SWIZZLE_Z = 0, 90 MICRO_SWIZZLE_S = 1, 91 MICRO_SWIZZLE_D = 2, 92 MICRO_SWIZZLE_R = 3 93 }; 94 95 const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 96 { 97 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); 98 } 99 100 void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, 101 bool *per_pixel_alpha, bool *pre_multiplied_alpha, 102 bool *global_alpha, int *global_alpha_value) 103 { 104 *per_pixel_alpha = false; 105 *pre_multiplied_alpha = true; 106 *global_alpha = false; 107 *global_alpha_value = 0xff; 108 109 110 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || 111 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { 112 static const uint32_t alpha_formats[] = { 113 DRM_FORMAT_ARGB8888, 114 DRM_FORMAT_RGBA8888, 115 DRM_FORMAT_ABGR8888, 116 DRM_FORMAT_ARGB2101010, 117 DRM_FORMAT_ABGR2101010, 118 DRM_FORMAT_ARGB16161616, 119 DRM_FORMAT_ABGR16161616, 120 DRM_FORMAT_ARGB16161616F, 121 }; 122 uint32_t format = plane_state->fb->format->format; 123 unsigned int i; 124 125 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { 126 if (format == alpha_formats[i]) { 127 *per_pixel_alpha = true; 128 break; 129 } 130 } 131 132 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) 133 *pre_multiplied_alpha = false; 134 } 135 136 if (plane_state->alpha < 0xffff) { 137 *global_alpha = true; 138 *global_alpha_value = plane_state->alpha >> 8; 139 } 140 } 141 142 static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) 143 { 144 if (!*mods) 145 return; 146 147 if (*cap - *size < 1) { 148 uint64_t new_cap = *cap * 2; 149 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); 150 151 if (!new_mods) { 152 kfree(*mods); 153 *mods = NULL; 154 return; 155 } 156 157 memcpy(new_mods, *mods, sizeof(uint64_t) * *size); 158 kfree(*mods); 159 *mods = new_mods; 160 *cap = new_cap; 161 } 162 163 (*mods)[*size] = mod; 164 *size += 1; 165 } 166 167 static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier) 168 { 169 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); 170 } 171 172 static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier) 173 { 174 if (modifier == DRM_FORMAT_MOD_LINEAR) 175 return 0; 176 177 return AMD_FMT_MOD_GET(TILE, modifier); 178 } 179 180 static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info *tiling_info, 181 uint64_t tiling_flags) 182 { 183 /* Fill GFX8 params */ 184 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { 185 unsigned int bankw, bankh, mtaspect, tile_split, num_banks; 186 187 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 188 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 189 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 190 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 191 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 192 193 /* XXX fix me for VI */ 194 tiling_info->gfx8.num_banks = num_banks; 195 tiling_info->gfx8.array_mode = 196 DC_ARRAY_2D_TILED_THIN1; 197 tiling_info->gfx8.tile_split = tile_split; 198 tiling_info->gfx8.bank_width = bankw; 199 tiling_info->gfx8.bank_height = bankh; 200 tiling_info->gfx8.tile_aspect = mtaspect; 201 tiling_info->gfx8.tile_mode = 202 DC_ADDR_SURF_MICRO_TILING_DISPLAY; 203 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) 204 == DC_ARRAY_1D_TILED_THIN1) { 205 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; 206 } 207 208 tiling_info->gfx8.pipe_config = 209 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 210 } 211 212 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, 213 struct dc_tiling_info *tiling_info) 214 { 215 /* Fill GFX9 params */ 216 tiling_info->gfx9.num_pipes = 217 adev->gfx.config.gb_addr_config_fields.num_pipes; 218 tiling_info->gfx9.num_banks = 219 adev->gfx.config.gb_addr_config_fields.num_banks; 220 tiling_info->gfx9.pipe_interleave = 221 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; 222 tiling_info->gfx9.num_shader_engines = 223 adev->gfx.config.gb_addr_config_fields.num_se; 224 tiling_info->gfx9.max_compressed_frags = 225 adev->gfx.config.gb_addr_config_fields.max_compress_frags; 226 tiling_info->gfx9.num_rb_per_se = 227 adev->gfx.config.gb_addr_config_fields.num_rb_per_se; 228 tiling_info->gfx9.shaderEnable = 1; 229 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) 230 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; 231 } 232 233 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, 234 struct dc_tiling_info *tiling_info, 235 uint64_t modifier) 236 { 237 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); 238 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); 239 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); 240 unsigned int pipes_log2; 241 242 pipes_log2 = min(5u, mod_pipe_xor_bits); 243 244 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info); 245 246 if (!IS_AMD_FMT_MOD(modifier)) 247 return; 248 249 tiling_info->gfx9.num_pipes = 1u << pipes_log2; 250 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); 251 252 if (adev->family >= AMDGPU_FAMILY_NV) { 253 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; 254 } else { 255 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; 256 257 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ 258 } 259 } 260 261 static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev, 262 const enum surface_pixel_format format, 263 const enum dc_rotation_angle rotation, 264 const struct dc_tiling_info *tiling_info, 265 const struct dc_plane_dcc_param *dcc, 266 const struct dc_plane_address *address, 267 const struct plane_size *plane_size) 268 { 269 struct dc *dc = adev->dm.dc; 270 struct dc_dcc_surface_param input; 271 struct dc_surface_dcc_cap output; 272 273 memset(&input, 0, sizeof(input)); 274 memset(&output, 0, sizeof(output)); 275 276 if (!dcc->enable) 277 return 0; 278 279 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || 280 !dc->cap_funcs.get_dcc_compression_cap) 281 return -EINVAL; 282 283 input.format = format; 284 input.surface_size.width = plane_size->surface_size.width; 285 input.surface_size.height = plane_size->surface_size.height; 286 input.swizzle_mode = tiling_info->gfx9.swizzle; 287 288 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) 289 input.scan = SCAN_DIRECTION_HORIZONTAL; 290 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) 291 input.scan = SCAN_DIRECTION_VERTICAL; 292 293 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) 294 return -EINVAL; 295 296 if (!output.capable) 297 return -EINVAL; 298 299 if (dcc->independent_64b_blks == 0 && 300 output.grph.rgb.independent_64b_blks != 0) 301 return -EINVAL; 302 303 return 0; 304 } 305 306 static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, 307 const struct amdgpu_framebuffer *afb, 308 const enum surface_pixel_format format, 309 const enum dc_rotation_angle rotation, 310 const struct plane_size *plane_size, 311 struct dc_tiling_info *tiling_info, 312 struct dc_plane_dcc_param *dcc, 313 struct dc_plane_address *address) 314 { 315 const uint64_t modifier = afb->base.modifier; 316 int ret = 0; 317 318 amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); 319 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier); 320 321 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { 322 uint64_t dcc_address = afb->address + afb->base.offsets[1]; 323 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); 324 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); 325 326 dcc->enable = 1; 327 dcc->meta_pitch = afb->base.pitches[1]; 328 dcc->independent_64b_blks = independent_64b_blks; 329 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { 330 if (independent_64b_blks && independent_128b_blks) 331 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; 332 else if (independent_128b_blks) 333 dcc->dcc_ind_blk = hubp_ind_block_128b; 334 else if (independent_64b_blks && !independent_128b_blks) 335 dcc->dcc_ind_blk = hubp_ind_block_64b; 336 else 337 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 338 } else { 339 if (independent_64b_blks) 340 dcc->dcc_ind_blk = hubp_ind_block_64b; 341 else 342 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 343 } 344 345 address->grph.meta_addr.low_part = lower_32_bits(dcc_address); 346 address->grph.meta_addr.high_part = upper_32_bits(dcc_address); 347 } 348 349 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); 350 if (ret) 351 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret); 352 353 return ret; 354 } 355 356 static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device *adev, 357 const struct amdgpu_framebuffer *afb, 358 const enum surface_pixel_format format, 359 const enum dc_rotation_angle rotation, 360 const struct plane_size *plane_size, 361 struct dc_tiling_info *tiling_info, 362 struct dc_plane_dcc_param *dcc, 363 struct dc_plane_address *address) 364 { 365 const uint64_t modifier = afb->base.modifier; 366 int ret = 0; 367 368 /* TODO: Most of this function shouldn't be needed on GFX12. */ 369 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info); 370 371 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier); 372 373 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { 374 int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier); 375 376 dcc->enable = 1; 377 dcc->independent_64b_blks = max_compressed_block == 0; 378 379 if (max_compressed_block == 0) 380 dcc->dcc_ind_blk = hubp_ind_block_64b; 381 else if (max_compressed_block == 1) 382 dcc->dcc_ind_blk = hubp_ind_block_128b; 383 else 384 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 385 } 386 387 /* TODO: This seems wrong because there is no DCC plane on GFX12. */ 388 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); 389 if (ret) 390 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret); 391 392 return ret; 393 } 394 395 static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev, 396 uint64_t **mods, 397 uint64_t *size, 398 uint64_t *capacity) 399 { 400 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 401 402 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 403 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 404 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 405 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 406 AMD_FMT_MOD_SET(DCC, 1) | 407 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 408 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 409 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 410 411 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 412 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 413 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 414 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 415 AMD_FMT_MOD_SET(DCC, 1) | 416 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 417 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 418 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 419 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 420 421 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 422 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 423 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 424 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 425 426 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 427 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 428 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 429 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 430 431 432 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ 433 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 434 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 435 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 436 437 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 438 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 439 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 440 } 441 442 static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev, 443 uint64_t **mods, 444 uint64_t *size, 445 uint64_t *capacity) 446 { 447 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 448 int pipe_xor_bits = min(8, pipes + 449 ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); 450 int bank_xor_bits = min(8 - pipe_xor_bits, 451 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); 452 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + 453 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); 454 455 456 if (adev->family == AMDGPU_FAMILY_RV) { 457 /* Raven2 and later */ 458 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; 459 460 /* 461 * No _D DCC swizzles yet because we only allow 32bpp, which 462 * doesn't support _D on DCN 463 */ 464 465 if (has_constant_encode) { 466 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 467 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 468 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 469 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 470 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 471 AMD_FMT_MOD_SET(DCC, 1) | 472 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 473 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 474 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); 475 } 476 477 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 478 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 479 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 480 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 481 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 482 AMD_FMT_MOD_SET(DCC, 1) | 483 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 484 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 485 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); 486 487 if (has_constant_encode) { 488 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 489 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 490 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 491 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 492 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 493 AMD_FMT_MOD_SET(DCC, 1) | 494 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 495 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 496 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 497 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 498 AMD_FMT_MOD_SET(RB, rb) | 499 AMD_FMT_MOD_SET(PIPE, pipes)); 500 } 501 502 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 503 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 504 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 505 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 506 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 507 AMD_FMT_MOD_SET(DCC, 1) | 508 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 509 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 510 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 511 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | 512 AMD_FMT_MOD_SET(RB, rb) | 513 AMD_FMT_MOD_SET(PIPE, pipes)); 514 } 515 516 /* 517 * Only supported for 64bpp on Raven, will be filtered on format in 518 * amdgpu_dm_plane_format_mod_supported. 519 */ 520 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 521 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | 522 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 523 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 524 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 525 526 if (adev->family == AMDGPU_FAMILY_RV) { 527 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 528 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 529 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 530 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 531 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 532 } 533 534 /* 535 * Only supported for 64bpp on Raven, will be filtered on format in 536 * amdgpu_dm_plane_format_mod_supported. 537 */ 538 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 539 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 540 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 541 542 if (adev->family == AMDGPU_FAMILY_RV) { 543 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 544 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 545 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 546 } 547 } 548 549 static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev, 550 uint64_t **mods, 551 uint64_t *size, 552 uint64_t *capacity) 553 { 554 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 555 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); 556 557 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 558 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 559 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 560 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 561 AMD_FMT_MOD_SET(PACKERS, pkrs) | 562 AMD_FMT_MOD_SET(DCC, 1) | 563 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 564 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 565 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 566 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 567 568 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 569 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 570 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 571 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 572 AMD_FMT_MOD_SET(PACKERS, pkrs) | 573 AMD_FMT_MOD_SET(DCC, 1) | 574 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 575 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 576 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 577 578 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 579 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 580 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 581 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 582 AMD_FMT_MOD_SET(PACKERS, pkrs) | 583 AMD_FMT_MOD_SET(DCC, 1) | 584 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 585 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 586 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 587 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 588 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 589 590 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 591 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 592 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 593 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 594 AMD_FMT_MOD_SET(PACKERS, pkrs) | 595 AMD_FMT_MOD_SET(DCC, 1) | 596 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 597 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 598 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 599 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 600 601 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 602 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 603 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 604 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 605 AMD_FMT_MOD_SET(PACKERS, pkrs)); 606 607 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 608 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 609 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 610 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 611 AMD_FMT_MOD_SET(PACKERS, pkrs)); 612 613 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ 614 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 615 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 616 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 617 618 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 619 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 620 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 621 } 622 623 static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev, 624 uint64_t **mods, uint64_t *size, uint64_t *capacity) 625 { 626 int num_pipes = 0; 627 int pipe_xor_bits = 0; 628 int num_pkrs = 0; 629 int pkrs = 0; 630 u32 gb_addr_config; 631 u8 i = 0; 632 unsigned int swizzle_r_x; 633 uint64_t modifier_r_x; 634 uint64_t modifier_dcc_best; 635 uint64_t modifier_dcc_4k; 636 637 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from 638 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} 639 */ 640 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 641 ASSERT(gb_addr_config != 0); 642 643 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 644 pkrs = ilog2(num_pkrs); 645 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES); 646 pipe_xor_bits = ilog2(num_pipes); 647 648 for (i = 0; i < 2; i++) { 649 /* Insert the best one first. */ 650 /* R_X swizzle modes are the best for rendering and DCC requires them. */ 651 if (num_pipes > 16) 652 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X; 653 else 654 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X; 655 656 modifier_r_x = AMD_FMT_MOD | 657 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | 658 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 659 AMD_FMT_MOD_SET(TILE, swizzle_r_x) | 660 AMD_FMT_MOD_SET(PACKERS, pkrs); 661 662 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */ 663 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | 664 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) | 665 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 666 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B); 667 668 /* DCC settings for 4K and greater resolutions. (required by display hw) */ 669 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | 670 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 671 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 672 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); 673 674 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best); 675 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k); 676 677 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); 678 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); 679 680 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x); 681 } 682 683 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | 684 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | 685 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); 686 } 687 688 static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev, 689 uint64_t **mods, uint64_t *size, uint64_t *capacity) 690 { 691 uint64_t ver = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12); 692 uint64_t mod_256k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D); 693 uint64_t mod_64k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D); 694 uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D); 695 uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D); 696 uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1); 697 uint8_t max_comp_block[] = {1, 0}; 698 uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0}; 699 uint8_t i = 0, j = 0; 700 uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR}; 701 702 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++) 703 max_comp_block_mod[i] = AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block[i]); 704 705 /* With DCC: Best choice should be kept first. Hence, add all 256k modifiers of different 706 * max compressed blocks first and then move on to the next smaller sized layouts. 707 * Do not add the linear modifier here, and hence the condition of size-1 for the loop 708 */ 709 for (j = 0; j < ARRAY_SIZE(gfx12_modifiers) - 1; j++) 710 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++) 711 amdgpu_dm_plane_add_modifier(mods, size, capacity, 712 ver | dcc | max_comp_block_mod[i] | gfx12_modifiers[j]); 713 714 /* Without DCC. Add all modifiers including linear at the end */ 715 for (i = 0; i < ARRAY_SIZE(gfx12_modifiers); i++) 716 amdgpu_dm_plane_add_modifier(mods, size, capacity, gfx12_modifiers[i]); 717 718 } 719 720 static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) 721 { 722 uint64_t size = 0, capacity = 128; 723 *mods = NULL; 724 725 /* We have not hooked up any pre-GFX9 modifiers. */ 726 if (adev->family < AMDGPU_FAMILY_AI) 727 return 0; 728 729 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); 730 731 if (plane_type == DRM_PLANE_TYPE_CURSOR) { 732 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 733 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 734 return *mods ? 0 : -ENOMEM; 735 } 736 737 switch (adev->family) { 738 case AMDGPU_FAMILY_AI: 739 case AMDGPU_FAMILY_RV: 740 amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity); 741 break; 742 case AMDGPU_FAMILY_NV: 743 case AMDGPU_FAMILY_VGH: 744 case AMDGPU_FAMILY_YC: 745 case AMDGPU_FAMILY_GC_10_3_6: 746 case AMDGPU_FAMILY_GC_10_3_7: 747 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) 748 amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity); 749 else 750 amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity); 751 break; 752 case AMDGPU_FAMILY_GC_11_0_0: 753 case AMDGPU_FAMILY_GC_11_0_1: 754 case AMDGPU_FAMILY_GC_11_5_0: 755 amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity); 756 break; 757 case AMDGPU_FAMILY_GC_12_0_0: 758 amdgpu_dm_plane_add_gfx12_modifiers(adev, mods, &size, &capacity); 759 break; 760 } 761 762 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 763 764 /* INVALID marks the end of the list. */ 765 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 766 767 if (!*mods) 768 return -ENOMEM; 769 770 return 0; 771 } 772 773 static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane, 774 const struct dc_plane_cap *plane_cap, 775 uint32_t *formats, int max_formats) 776 { 777 int i, num_formats = 0; 778 779 /* 780 * TODO: Query support for each group of formats directly from 781 * DC plane caps. This will require adding more formats to the 782 * caps list. 783 */ 784 785 if (plane->type == DRM_PLANE_TYPE_PRIMARY || 786 (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) { 787 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { 788 if (num_formats >= max_formats) 789 break; 790 791 formats[num_formats++] = rgb_formats[i]; 792 } 793 794 if (plane_cap && plane_cap->pixel_format_support.nv12) 795 formats[num_formats++] = DRM_FORMAT_NV12; 796 if (plane_cap && plane_cap->pixel_format_support.p010) 797 formats[num_formats++] = DRM_FORMAT_P010; 798 if (plane_cap && plane_cap->pixel_format_support.fp16) { 799 formats[num_formats++] = DRM_FORMAT_XRGB16161616F; 800 formats[num_formats++] = DRM_FORMAT_ARGB16161616F; 801 formats[num_formats++] = DRM_FORMAT_XBGR16161616F; 802 formats[num_formats++] = DRM_FORMAT_ABGR16161616F; 803 } 804 } else { 805 switch (plane->type) { 806 case DRM_PLANE_TYPE_OVERLAY: 807 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { 808 if (num_formats >= max_formats) 809 break; 810 811 formats[num_formats++] = overlay_formats[i]; 812 } 813 break; 814 815 case DRM_PLANE_TYPE_CURSOR: 816 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { 817 if (num_formats >= max_formats) 818 break; 819 820 formats[num_formats++] = cursor_formats[i]; 821 } 822 break; 823 824 default: 825 break; 826 } 827 } 828 829 return num_formats; 830 } 831 832 int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev, 833 const struct amdgpu_framebuffer *afb, 834 const enum surface_pixel_format format, 835 const enum dc_rotation_angle rotation, 836 const uint64_t tiling_flags, 837 struct dc_tiling_info *tiling_info, 838 struct plane_size *plane_size, 839 struct dc_plane_dcc_param *dcc, 840 struct dc_plane_address *address, 841 bool tmz_surface) 842 { 843 const struct drm_framebuffer *fb = &afb->base; 844 int ret; 845 846 memset(tiling_info, 0, sizeof(*tiling_info)); 847 memset(plane_size, 0, sizeof(*plane_size)); 848 memset(dcc, 0, sizeof(*dcc)); 849 memset(address, 0, sizeof(*address)); 850 851 address->tmz_surface = tmz_surface; 852 853 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 854 uint64_t addr = afb->address + fb->offsets[0]; 855 856 plane_size->surface_size.x = 0; 857 plane_size->surface_size.y = 0; 858 plane_size->surface_size.width = fb->width; 859 plane_size->surface_size.height = fb->height; 860 plane_size->surface_pitch = 861 fb->pitches[0] / fb->format->cpp[0]; 862 863 address->type = PLN_ADDR_TYPE_GRAPHICS; 864 address->grph.addr.low_part = lower_32_bits(addr); 865 address->grph.addr.high_part = upper_32_bits(addr); 866 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { 867 uint64_t luma_addr = afb->address + fb->offsets[0]; 868 uint64_t chroma_addr = afb->address + fb->offsets[1]; 869 870 plane_size->surface_size.x = 0; 871 plane_size->surface_size.y = 0; 872 plane_size->surface_size.width = fb->width; 873 plane_size->surface_size.height = fb->height; 874 plane_size->surface_pitch = 875 fb->pitches[0] / fb->format->cpp[0]; 876 877 plane_size->chroma_size.x = 0; 878 plane_size->chroma_size.y = 0; 879 /* TODO: set these based on surface format */ 880 plane_size->chroma_size.width = fb->width / 2; 881 plane_size->chroma_size.height = fb->height / 2; 882 883 plane_size->chroma_pitch = 884 fb->pitches[1] / fb->format->cpp[1]; 885 886 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 887 address->video_progressive.luma_addr.low_part = 888 lower_32_bits(luma_addr); 889 address->video_progressive.luma_addr.high_part = 890 upper_32_bits(luma_addr); 891 address->video_progressive.chroma_addr.low_part = 892 lower_32_bits(chroma_addr); 893 address->video_progressive.chroma_addr.high_part = 894 upper_32_bits(chroma_addr); 895 } 896 897 if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) { 898 ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format, 899 rotation, plane_size, 900 tiling_info, dcc, 901 address); 902 if (ret) 903 return ret; 904 } else if (adev->family >= AMDGPU_FAMILY_AI) { 905 ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, 906 rotation, plane_size, 907 tiling_info, dcc, 908 address); 909 if (ret) 910 return ret; 911 } else { 912 amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); 913 } 914 915 return 0; 916 } 917 918 static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane, 919 struct drm_plane_state *new_state) 920 { 921 struct amdgpu_framebuffer *afb; 922 struct drm_gem_object *obj; 923 struct amdgpu_device *adev; 924 struct amdgpu_bo *rbo; 925 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 926 uint32_t domain; 927 int r; 928 929 if (!new_state->fb) { 930 DRM_DEBUG_KMS("No FB bound\n"); 931 return 0; 932 } 933 934 afb = to_amdgpu_framebuffer(new_state->fb); 935 obj = drm_gem_fb_get_obj(new_state->fb, 0); 936 if (!obj) { 937 DRM_ERROR("Failed to get obj from framebuffer\n"); 938 return -EINVAL; 939 } 940 941 rbo = gem_to_amdgpu_bo(obj); 942 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 943 r = amdgpu_bo_reserve(rbo, true); 944 if (r) { 945 dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 946 return r; 947 } 948 949 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); 950 if (r) { 951 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); 952 goto error_unlock; 953 } 954 955 if (plane->type != DRM_PLANE_TYPE_CURSOR) 956 domain = amdgpu_display_supported_domains(adev, rbo->flags); 957 else 958 domain = AMDGPU_GEM_DOMAIN_VRAM; 959 960 rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 961 r = amdgpu_bo_pin(rbo, domain); 962 if (unlikely(r != 0)) { 963 if (r != -ERESTARTSYS) 964 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 965 goto error_unlock; 966 } 967 968 r = amdgpu_ttm_alloc_gart(&rbo->tbo); 969 if (unlikely(r != 0)) { 970 DRM_ERROR("%p bind failed\n", rbo); 971 goto error_unpin; 972 } 973 974 r = drm_gem_plane_helper_prepare_fb(plane, new_state); 975 if (unlikely(r != 0)) 976 goto error_unpin; 977 978 amdgpu_bo_unreserve(rbo); 979 980 afb->address = amdgpu_bo_gpu_offset(rbo); 981 982 amdgpu_bo_ref(rbo); 983 984 /** 985 * We don't do surface updates on planes that have been newly created, 986 * but we also don't have the afb->address during atomic check. 987 * 988 * Fill in buffer attributes depending on the address here, but only on 989 * newly created planes since they're not being used by DC yet and this 990 * won't modify global state. 991 */ 992 dm_plane_state_old = to_dm_plane_state(plane->state); 993 dm_plane_state_new = to_dm_plane_state(new_state); 994 995 if (dm_plane_state_new->dc_state && 996 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { 997 struct dc_plane_state *plane_state = 998 dm_plane_state_new->dc_state; 999 1000 amdgpu_dm_plane_fill_plane_buffer_attributes( 1001 adev, afb, plane_state->format, plane_state->rotation, 1002 afb->tiling_flags, 1003 &plane_state->tiling_info, &plane_state->plane_size, 1004 &plane_state->dcc, &plane_state->address, 1005 afb->tmz_surface); 1006 } 1007 1008 return 0; 1009 1010 error_unpin: 1011 amdgpu_bo_unpin(rbo); 1012 1013 error_unlock: 1014 amdgpu_bo_unreserve(rbo); 1015 return r; 1016 } 1017 1018 static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane, 1019 struct drm_plane_state *old_state) 1020 { 1021 struct amdgpu_bo *rbo; 1022 int r; 1023 1024 if (!old_state->fb) 1025 return; 1026 1027 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 1028 r = amdgpu_bo_reserve(rbo, false); 1029 if (unlikely(r)) { 1030 DRM_ERROR("failed to reserve rbo before unpin\n"); 1031 return; 1032 } 1033 1034 amdgpu_bo_unpin(rbo); 1035 amdgpu_bo_unreserve(rbo); 1036 amdgpu_bo_unref(&rbo); 1037 } 1038 1039 static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev, 1040 struct drm_framebuffer *fb, 1041 int *min_downscale, int *max_upscale) 1042 { 1043 struct amdgpu_device *adev = drm_to_adev(dev); 1044 struct dc *dc = adev->dm.dc; 1045 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ 1046 struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; 1047 1048 switch (fb->format->format) { 1049 case DRM_FORMAT_P010: 1050 case DRM_FORMAT_NV12: 1051 case DRM_FORMAT_NV21: 1052 *max_upscale = plane_cap->max_upscale_factor.nv12; 1053 *min_downscale = plane_cap->max_downscale_factor.nv12; 1054 break; 1055 1056 case DRM_FORMAT_XRGB16161616F: 1057 case DRM_FORMAT_ARGB16161616F: 1058 case DRM_FORMAT_XBGR16161616F: 1059 case DRM_FORMAT_ABGR16161616F: 1060 *max_upscale = plane_cap->max_upscale_factor.fp16; 1061 *min_downscale = plane_cap->max_downscale_factor.fp16; 1062 break; 1063 1064 default: 1065 *max_upscale = plane_cap->max_upscale_factor.argb8888; 1066 *min_downscale = plane_cap->max_downscale_factor.argb8888; 1067 break; 1068 } 1069 1070 /* 1071 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a 1072 * scaling factor of 1.0 == 1000 units. 1073 */ 1074 if (*max_upscale == 1) 1075 *max_upscale = 1000; 1076 1077 if (*min_downscale == 1) 1078 *min_downscale = 1000; 1079 } 1080 1081 int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state, 1082 struct drm_crtc_state *new_crtc_state) 1083 { 1084 struct drm_framebuffer *fb = state->fb; 1085 int min_downscale, max_upscale; 1086 int min_scale = 0; 1087 int max_scale = INT_MAX; 1088 1089 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ 1090 if (fb && state->crtc) { 1091 /* Validate viewport to cover the case when only the position changes */ 1092 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { 1093 int viewport_width = state->crtc_w; 1094 int viewport_height = state->crtc_h; 1095 1096 if (state->crtc_x < 0) 1097 viewport_width += state->crtc_x; 1098 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) 1099 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; 1100 1101 if (state->crtc_y < 0) 1102 viewport_height += state->crtc_y; 1103 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) 1104 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; 1105 1106 if (viewport_width < 0 || viewport_height < 0) { 1107 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); 1108 return -EINVAL; 1109 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ 1110 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); 1111 return -EINVAL; 1112 } else if (viewport_height < MIN_VIEWPORT_SIZE) { 1113 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); 1114 return -EINVAL; 1115 } 1116 1117 } 1118 1119 /* Get min/max allowed scaling factors from plane caps. */ 1120 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb, 1121 &min_downscale, &max_upscale); 1122 /* 1123 * Convert to drm convention: 16.16 fixed point, instead of dc's 1124 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's 1125 * dst/src, so min_scale = 1.0 / max_upscale, etc. 1126 */ 1127 min_scale = (1000 << 16) / max_upscale; 1128 max_scale = (1000 << 16) / min_downscale; 1129 } 1130 1131 return drm_atomic_helper_check_plane_state( 1132 state, new_crtc_state, min_scale, max_scale, true, true); 1133 } 1134 1135 int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, 1136 const struct drm_plane_state *state, 1137 struct dc_scaling_info *scaling_info) 1138 { 1139 int scale_w, scale_h, min_downscale, max_upscale; 1140 1141 memset(scaling_info, 0, sizeof(*scaling_info)); 1142 1143 /* Source is fixed 16.16 but we ignore mantissa for now... */ 1144 scaling_info->src_rect.x = state->src_x >> 16; 1145 scaling_info->src_rect.y = state->src_y >> 16; 1146 1147 /* 1148 * For reasons we don't (yet) fully understand a non-zero 1149 * src_y coordinate into an NV12 buffer can cause a 1150 * system hang on DCN1x. 1151 * To avoid hangs (and maybe be overly cautious) 1152 * let's reject both non-zero src_x and src_y. 1153 * 1154 * We currently know of only one use-case to reproduce a 1155 * scenario with non-zero src_x and src_y for NV12, which 1156 * is to gesture the YouTube Android app into full screen 1157 * on ChromeOS. 1158 */ 1159 if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || 1160 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) && 1161 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && 1162 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) 1163 return -EINVAL; 1164 1165 scaling_info->src_rect.width = state->src_w >> 16; 1166 if (scaling_info->src_rect.width == 0) 1167 return -EINVAL; 1168 1169 scaling_info->src_rect.height = state->src_h >> 16; 1170 if (scaling_info->src_rect.height == 0) 1171 return -EINVAL; 1172 1173 scaling_info->dst_rect.x = state->crtc_x; 1174 scaling_info->dst_rect.y = state->crtc_y; 1175 1176 if (state->crtc_w == 0) 1177 return -EINVAL; 1178 1179 scaling_info->dst_rect.width = state->crtc_w; 1180 1181 if (state->crtc_h == 0) 1182 return -EINVAL; 1183 1184 scaling_info->dst_rect.height = state->crtc_h; 1185 1186 /* DRM doesn't specify clipping on destination output. */ 1187 scaling_info->clip_rect = scaling_info->dst_rect; 1188 1189 /* Validate scaling per-format with DC plane caps */ 1190 if (state->plane && state->plane->dev && state->fb) { 1191 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb, 1192 &min_downscale, &max_upscale); 1193 } else { 1194 min_downscale = 250; 1195 max_upscale = 16000; 1196 } 1197 1198 scale_w = scaling_info->dst_rect.width * 1000 / 1199 scaling_info->src_rect.width; 1200 1201 if (scale_w < min_downscale || scale_w > max_upscale) 1202 return -EINVAL; 1203 1204 scale_h = scaling_info->dst_rect.height * 1000 / 1205 scaling_info->src_rect.height; 1206 1207 if (scale_h < min_downscale || scale_h > max_upscale) 1208 return -EINVAL; 1209 1210 /* 1211 * The "scaling_quality" can be ignored for now, quality = 0 has DC 1212 * assume reasonable defaults based on the format. 1213 */ 1214 1215 return 0; 1216 } 1217 1218 static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane, 1219 struct drm_atomic_state *state) 1220 { 1221 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 1222 plane); 1223 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1224 struct dc *dc = adev->dm.dc; 1225 struct dm_plane_state *dm_plane_state; 1226 struct dc_scaling_info scaling_info; 1227 struct drm_crtc_state *new_crtc_state; 1228 int ret; 1229 1230 trace_amdgpu_dm_plane_atomic_check(new_plane_state); 1231 1232 dm_plane_state = to_dm_plane_state(new_plane_state); 1233 1234 if (!dm_plane_state->dc_state) 1235 return 0; 1236 1237 new_crtc_state = 1238 drm_atomic_get_new_crtc_state(state, 1239 new_plane_state->crtc); 1240 if (!new_crtc_state) 1241 return -EINVAL; 1242 1243 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); 1244 if (ret) 1245 return ret; 1246 1247 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info); 1248 if (ret) 1249 return ret; 1250 1251 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 1252 return 0; 1253 1254 return -EINVAL; 1255 } 1256 1257 static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane, 1258 struct drm_atomic_state *state) 1259 { 1260 struct drm_crtc_state *new_crtc_state; 1261 struct drm_plane_state *new_plane_state; 1262 struct dm_crtc_state *dm_new_crtc_state; 1263 1264 /* Only support async updates on cursor planes. */ 1265 if (plane->type != DRM_PLANE_TYPE_CURSOR) 1266 return -EINVAL; 1267 1268 new_plane_state = drm_atomic_get_new_plane_state(state, plane); 1269 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 1270 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 1271 /* Reject overlay cursors for now*/ 1272 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) 1273 return -EINVAL; 1274 1275 return 0; 1276 } 1277 1278 int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, 1279 struct dc_cursor_position *position) 1280 { 1281 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1282 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1283 int x, y; 1284 int xorigin = 0, yorigin = 0; 1285 1286 if (!crtc || !plane->state->fb) 1287 return 0; 1288 1289 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || 1290 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { 1291 DRM_ERROR("%s: bad cursor width or height %d x %d\n", 1292 __func__, 1293 plane->state->crtc_w, 1294 plane->state->crtc_h); 1295 return -EINVAL; 1296 } 1297 1298 x = plane->state->crtc_x; 1299 y = plane->state->crtc_y; 1300 1301 if (x <= -amdgpu_crtc->max_cursor_width || 1302 y <= -amdgpu_crtc->max_cursor_height) 1303 return 0; 1304 1305 if (x < 0) { 1306 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 1307 x = 0; 1308 } 1309 if (y < 0) { 1310 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 1311 y = 0; 1312 } 1313 position->enable = true; 1314 position->x = x; 1315 position->y = y; 1316 position->x_hotspot = xorigin; 1317 position->y_hotspot = yorigin; 1318 1319 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(4, 0, 1)) 1320 position->translate_by_source = true; 1321 1322 return 0; 1323 } 1324 1325 void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, 1326 struct drm_plane_state *old_plane_state) 1327 { 1328 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1329 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 1330 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 1331 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 1332 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1333 uint64_t address = afb ? afb->address : 0; 1334 struct dc_cursor_position position = {0}; 1335 struct dc_cursor_attributes attributes; 1336 int ret; 1337 1338 if (!plane->state->fb && !old_plane_state->fb) 1339 return; 1340 1341 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n", 1342 amdgpu_crtc->crtc_id, plane->state->crtc_w, 1343 plane->state->crtc_h); 1344 1345 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); 1346 if (ret) 1347 return; 1348 1349 if (!position.enable) { 1350 /* turn off cursor */ 1351 if (crtc_state && crtc_state->stream) { 1352 mutex_lock(&adev->dm.dc_lock); 1353 dc_stream_program_cursor_position(crtc_state->stream, 1354 &position); 1355 mutex_unlock(&adev->dm.dc_lock); 1356 } 1357 return; 1358 } 1359 1360 amdgpu_crtc->cursor_width = plane->state->crtc_w; 1361 amdgpu_crtc->cursor_height = plane->state->crtc_h; 1362 1363 memset(&attributes, 0, sizeof(attributes)); 1364 attributes.address.high_part = upper_32_bits(address); 1365 attributes.address.low_part = lower_32_bits(address); 1366 attributes.width = plane->state->crtc_w; 1367 attributes.height = plane->state->crtc_h; 1368 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 1369 attributes.rotation_angle = 0; 1370 attributes.attribute_flags.value = 0; 1371 1372 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM 1373 * legacy gamma setup. 1374 */ 1375 if (crtc_state->cm_is_degamma_srgb && 1376 adev->dm.dc->caps.color.dpp.gamma_corr) 1377 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; 1378 1379 if (afb) 1380 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 1381 1382 if (crtc_state->stream) { 1383 mutex_lock(&adev->dm.dc_lock); 1384 if (!dc_stream_program_cursor_attributes(crtc_state->stream, 1385 &attributes)) 1386 DRM_ERROR("DC failed to set cursor attributes\n"); 1387 1388 if (!dc_stream_program_cursor_position(crtc_state->stream, 1389 &position)) 1390 DRM_ERROR("DC failed to set cursor position\n"); 1391 mutex_unlock(&adev->dm.dc_lock); 1392 } 1393 } 1394 1395 static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane, 1396 struct drm_atomic_state *state) 1397 { 1398 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 1399 plane); 1400 struct drm_plane_state *old_state = 1401 drm_atomic_get_old_plane_state(state, plane); 1402 1403 trace_amdgpu_dm_atomic_update_cursor(new_state); 1404 1405 swap(plane->state->fb, new_state->fb); 1406 1407 plane->state->src_x = new_state->src_x; 1408 plane->state->src_y = new_state->src_y; 1409 plane->state->src_w = new_state->src_w; 1410 plane->state->src_h = new_state->src_h; 1411 plane->state->crtc_x = new_state->crtc_x; 1412 plane->state->crtc_y = new_state->crtc_y; 1413 plane->state->crtc_w = new_state->crtc_w; 1414 plane->state->crtc_h = new_state->crtc_h; 1415 1416 amdgpu_dm_plane_handle_cursor_update(plane, old_state); 1417 } 1418 1419 static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane) 1420 { 1421 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane->state); 1422 struct drm_framebuffer *fb = plane->state->fb; 1423 struct dc_plane_state *dc_plane_state; 1424 1425 if (!dm_plane_state || !dm_plane_state->dc_state) 1426 return; 1427 1428 dc_plane_state = dm_plane_state->dc_state; 1429 1430 dc_plane_force_update_for_panic(dc_plane_state, fb->modifier ? true : false); 1431 } 1432 1433 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { 1434 .prepare_fb = amdgpu_dm_plane_helper_prepare_fb, 1435 .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb, 1436 .atomic_check = amdgpu_dm_plane_atomic_check, 1437 .atomic_async_check = amdgpu_dm_plane_atomic_async_check, 1438 .atomic_async_update = amdgpu_dm_plane_atomic_async_update 1439 }; 1440 1441 static const struct drm_plane_helper_funcs dm_primary_plane_helper_funcs = { 1442 .prepare_fb = amdgpu_dm_plane_helper_prepare_fb, 1443 .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb, 1444 .atomic_check = amdgpu_dm_plane_atomic_check, 1445 .atomic_async_check = amdgpu_dm_plane_atomic_async_check, 1446 .atomic_async_update = amdgpu_dm_plane_atomic_async_update, 1447 .get_scanout_buffer = amdgpu_display_get_scanout_buffer, 1448 .panic_flush = amdgpu_dm_plane_panic_flush, 1449 }; 1450 1451 static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane) 1452 { 1453 struct dm_plane_state *amdgpu_state = NULL; 1454 1455 if (plane->state) 1456 plane->funcs->atomic_destroy_state(plane, plane->state); 1457 1458 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); 1459 WARN_ON(amdgpu_state == NULL); 1460 1461 if (!amdgpu_state) 1462 return; 1463 1464 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); 1465 amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; 1466 amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT; 1467 amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; 1468 amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; 1469 } 1470 1471 static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane) 1472 { 1473 struct dm_plane_state *dm_plane_state, *old_dm_plane_state; 1474 1475 old_dm_plane_state = to_dm_plane_state(plane->state); 1476 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); 1477 if (!dm_plane_state) 1478 return NULL; 1479 1480 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); 1481 1482 if (old_dm_plane_state->dc_state) { 1483 dm_plane_state->dc_state = old_dm_plane_state->dc_state; 1484 dc_plane_state_retain(dm_plane_state->dc_state); 1485 } 1486 1487 if (old_dm_plane_state->degamma_lut) 1488 dm_plane_state->degamma_lut = 1489 drm_property_blob_get(old_dm_plane_state->degamma_lut); 1490 if (old_dm_plane_state->ctm) 1491 dm_plane_state->ctm = 1492 drm_property_blob_get(old_dm_plane_state->ctm); 1493 if (old_dm_plane_state->shaper_lut) 1494 dm_plane_state->shaper_lut = 1495 drm_property_blob_get(old_dm_plane_state->shaper_lut); 1496 if (old_dm_plane_state->lut3d) 1497 dm_plane_state->lut3d = 1498 drm_property_blob_get(old_dm_plane_state->lut3d); 1499 if (old_dm_plane_state->blend_lut) 1500 dm_plane_state->blend_lut = 1501 drm_property_blob_get(old_dm_plane_state->blend_lut); 1502 1503 dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf; 1504 dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult; 1505 dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf; 1506 dm_plane_state->blend_tf = old_dm_plane_state->blend_tf; 1507 1508 return &dm_plane_state->base; 1509 } 1510 1511 static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane, 1512 uint32_t format, 1513 uint64_t modifier) 1514 { 1515 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1516 const struct drm_format_info *info = drm_format_info(format); 1517 int i; 1518 1519 if (!info) 1520 return false; 1521 1522 /* 1523 * We always have to allow these modifiers: 1524 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. 1525 * 2. Not passing any modifiers is the same as explicitly passing INVALID. 1526 */ 1527 if (modifier == DRM_FORMAT_MOD_LINEAR || 1528 modifier == DRM_FORMAT_MOD_INVALID) { 1529 return true; 1530 } 1531 1532 /* Check that the modifier is on the list of the plane's supported modifiers. */ 1533 for (i = 0; i < plane->modifier_count; i++) { 1534 if (modifier == plane->modifiers[i]) 1535 break; 1536 } 1537 if (i == plane->modifier_count) 1538 return false; 1539 1540 /* GFX12 doesn't have these limitations. */ 1541 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) { 1542 enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3; 1543 1544 /* 1545 * For D swizzle the canonical modifier depends on the bpp, so check 1546 * it here. 1547 */ 1548 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && 1549 adev->family >= AMDGPU_FAMILY_NV) { 1550 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) 1551 return false; 1552 } 1553 1554 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && 1555 info->cpp[0] < 8) 1556 return false; 1557 1558 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { 1559 /* Per radeonsi comments 16/64 bpp are more complicated. */ 1560 if (info->cpp[0] != 4) 1561 return false; 1562 /* We support multi-planar formats, but not when combined with 1563 * additional DCC metadata planes. 1564 */ 1565 if (info->num_planes > 1) 1566 return false; 1567 } 1568 } 1569 1570 return true; 1571 } 1572 1573 static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, 1574 struct drm_plane_state *state) 1575 { 1576 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1577 1578 if (dm_plane_state->degamma_lut) 1579 drm_property_blob_put(dm_plane_state->degamma_lut); 1580 if (dm_plane_state->ctm) 1581 drm_property_blob_put(dm_plane_state->ctm); 1582 if (dm_plane_state->lut3d) 1583 drm_property_blob_put(dm_plane_state->lut3d); 1584 if (dm_plane_state->shaper_lut) 1585 drm_property_blob_put(dm_plane_state->shaper_lut); 1586 if (dm_plane_state->blend_lut) 1587 drm_property_blob_put(dm_plane_state->blend_lut); 1588 1589 if (dm_plane_state->dc_state) 1590 dc_plane_state_release(dm_plane_state->dc_state); 1591 1592 drm_atomic_helper_plane_destroy_state(plane, state); 1593 } 1594 1595 #ifdef AMD_PRIVATE_COLOR 1596 static void 1597 dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm, 1598 struct drm_plane *plane) 1599 { 1600 struct amdgpu_mode_info mode_info = dm->adev->mode_info; 1601 struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp; 1602 1603 /* Check HW color pipeline capabilities on DPP block (pre-blending) 1604 * before exposing related properties. 1605 */ 1606 if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) { 1607 drm_object_attach_property(&plane->base, 1608 mode_info.plane_degamma_lut_property, 1609 0); 1610 drm_object_attach_property(&plane->base, 1611 mode_info.plane_degamma_lut_size_property, 1612 MAX_COLOR_LUT_ENTRIES); 1613 drm_object_attach_property(&plane->base, 1614 dm->adev->mode_info.plane_degamma_tf_property, 1615 AMDGPU_TRANSFER_FUNCTION_DEFAULT); 1616 } 1617 /* HDR MULT is always available */ 1618 drm_object_attach_property(&plane->base, 1619 dm->adev->mode_info.plane_hdr_mult_property, 1620 AMDGPU_HDR_MULT_DEFAULT); 1621 1622 /* Only enable plane CTM if both DPP and MPC gamut remap is available. */ 1623 if (dm->dc->caps.color.mpc.gamut_remap) 1624 drm_object_attach_property(&plane->base, 1625 dm->adev->mode_info.plane_ctm_property, 0); 1626 1627 if (dpp_color_caps.hw_3d_lut) { 1628 drm_object_attach_property(&plane->base, 1629 mode_info.plane_shaper_lut_property, 0); 1630 drm_object_attach_property(&plane->base, 1631 mode_info.plane_shaper_lut_size_property, 1632 MAX_COLOR_LUT_ENTRIES); 1633 drm_object_attach_property(&plane->base, 1634 mode_info.plane_shaper_tf_property, 1635 AMDGPU_TRANSFER_FUNCTION_DEFAULT); 1636 drm_object_attach_property(&plane->base, 1637 mode_info.plane_lut3d_property, 0); 1638 drm_object_attach_property(&plane->base, 1639 mode_info.plane_lut3d_size_property, 1640 MAX_COLOR_3DLUT_SIZE); 1641 } 1642 1643 if (dpp_color_caps.ogam_ram) { 1644 drm_object_attach_property(&plane->base, 1645 mode_info.plane_blend_lut_property, 0); 1646 drm_object_attach_property(&plane->base, 1647 mode_info.plane_blend_lut_size_property, 1648 MAX_COLOR_LUT_ENTRIES); 1649 drm_object_attach_property(&plane->base, 1650 mode_info.plane_blend_tf_property, 1651 AMDGPU_TRANSFER_FUNCTION_DEFAULT); 1652 } 1653 } 1654 1655 static int 1656 dm_atomic_plane_set_property(struct drm_plane *plane, 1657 struct drm_plane_state *state, 1658 struct drm_property *property, 1659 uint64_t val) 1660 { 1661 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1662 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1663 bool replaced = false; 1664 int ret; 1665 1666 if (property == adev->mode_info.plane_degamma_lut_property) { 1667 ret = drm_property_replace_blob_from_id(plane->dev, 1668 &dm_plane_state->degamma_lut, 1669 val, -1, 1670 sizeof(struct drm_color_lut), 1671 &replaced); 1672 dm_plane_state->base.color_mgmt_changed |= replaced; 1673 return ret; 1674 } else if (property == adev->mode_info.plane_degamma_tf_property) { 1675 if (dm_plane_state->degamma_tf != val) { 1676 dm_plane_state->degamma_tf = val; 1677 dm_plane_state->base.color_mgmt_changed = 1; 1678 } 1679 } else if (property == adev->mode_info.plane_hdr_mult_property) { 1680 if (dm_plane_state->hdr_mult != val) { 1681 dm_plane_state->hdr_mult = val; 1682 dm_plane_state->base.color_mgmt_changed = 1; 1683 } 1684 } else if (property == adev->mode_info.plane_ctm_property) { 1685 ret = drm_property_replace_blob_from_id(plane->dev, 1686 &dm_plane_state->ctm, 1687 val, 1688 sizeof(struct drm_color_ctm_3x4), -1, 1689 &replaced); 1690 dm_plane_state->base.color_mgmt_changed |= replaced; 1691 return ret; 1692 } else if (property == adev->mode_info.plane_shaper_lut_property) { 1693 ret = drm_property_replace_blob_from_id(plane->dev, 1694 &dm_plane_state->shaper_lut, 1695 val, -1, 1696 sizeof(struct drm_color_lut), 1697 &replaced); 1698 dm_plane_state->base.color_mgmt_changed |= replaced; 1699 return ret; 1700 } else if (property == adev->mode_info.plane_shaper_tf_property) { 1701 if (dm_plane_state->shaper_tf != val) { 1702 dm_plane_state->shaper_tf = val; 1703 dm_plane_state->base.color_mgmt_changed = 1; 1704 } 1705 } else if (property == adev->mode_info.plane_lut3d_property) { 1706 ret = drm_property_replace_blob_from_id(plane->dev, 1707 &dm_plane_state->lut3d, 1708 val, -1, 1709 sizeof(struct drm_color_lut), 1710 &replaced); 1711 dm_plane_state->base.color_mgmt_changed |= replaced; 1712 return ret; 1713 } else if (property == adev->mode_info.plane_blend_lut_property) { 1714 ret = drm_property_replace_blob_from_id(plane->dev, 1715 &dm_plane_state->blend_lut, 1716 val, -1, 1717 sizeof(struct drm_color_lut), 1718 &replaced); 1719 dm_plane_state->base.color_mgmt_changed |= replaced; 1720 return ret; 1721 } else if (property == adev->mode_info.plane_blend_tf_property) { 1722 if (dm_plane_state->blend_tf != val) { 1723 dm_plane_state->blend_tf = val; 1724 dm_plane_state->base.color_mgmt_changed = 1; 1725 } 1726 } else { 1727 drm_dbg_atomic(plane->dev, 1728 "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", 1729 plane->base.id, plane->name, 1730 property->base.id, property->name); 1731 return -EINVAL; 1732 } 1733 1734 return 0; 1735 } 1736 1737 static int 1738 dm_atomic_plane_get_property(struct drm_plane *plane, 1739 const struct drm_plane_state *state, 1740 struct drm_property *property, 1741 uint64_t *val) 1742 { 1743 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1744 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1745 1746 if (property == adev->mode_info.plane_degamma_lut_property) { 1747 *val = (dm_plane_state->degamma_lut) ? 1748 dm_plane_state->degamma_lut->base.id : 0; 1749 } else if (property == adev->mode_info.plane_degamma_tf_property) { 1750 *val = dm_plane_state->degamma_tf; 1751 } else if (property == adev->mode_info.plane_hdr_mult_property) { 1752 *val = dm_plane_state->hdr_mult; 1753 } else if (property == adev->mode_info.plane_ctm_property) { 1754 *val = (dm_plane_state->ctm) ? 1755 dm_plane_state->ctm->base.id : 0; 1756 } else if (property == adev->mode_info.plane_shaper_lut_property) { 1757 *val = (dm_plane_state->shaper_lut) ? 1758 dm_plane_state->shaper_lut->base.id : 0; 1759 } else if (property == adev->mode_info.plane_shaper_tf_property) { 1760 *val = dm_plane_state->shaper_tf; 1761 } else if (property == adev->mode_info.plane_lut3d_property) { 1762 *val = (dm_plane_state->lut3d) ? 1763 dm_plane_state->lut3d->base.id : 0; 1764 } else if (property == adev->mode_info.plane_blend_lut_property) { 1765 *val = (dm_plane_state->blend_lut) ? 1766 dm_plane_state->blend_lut->base.id : 0; 1767 } else if (property == adev->mode_info.plane_blend_tf_property) { 1768 *val = dm_plane_state->blend_tf; 1769 1770 } else { 1771 return -EINVAL; 1772 } 1773 1774 return 0; 1775 } 1776 #endif 1777 1778 static const struct drm_plane_funcs dm_plane_funcs = { 1779 .update_plane = drm_atomic_helper_update_plane, 1780 .disable_plane = drm_atomic_helper_disable_plane, 1781 .destroy = drm_plane_helper_destroy, 1782 .reset = amdgpu_dm_plane_drm_plane_reset, 1783 .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state, 1784 .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state, 1785 .format_mod_supported = amdgpu_dm_plane_format_mod_supported, 1786 #ifdef AMD_PRIVATE_COLOR 1787 .atomic_set_property = dm_atomic_plane_set_property, 1788 .atomic_get_property = dm_atomic_plane_get_property, 1789 #endif 1790 }; 1791 1792 int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 1793 struct drm_plane *plane, 1794 unsigned long possible_crtcs, 1795 const struct dc_plane_cap *plane_cap) 1796 { 1797 uint32_t formats[32]; 1798 int num_formats; 1799 int res = -EPERM; 1800 unsigned int supported_rotations; 1801 uint64_t *modifiers = NULL; 1802 unsigned int primary_zpos = dm->dc->caps.max_slave_planes; 1803 1804 num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats, 1805 ARRAY_SIZE(formats)); 1806 1807 res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers); 1808 if (res) 1809 return res; 1810 1811 if (modifiers == NULL) 1812 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true; 1813 1814 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, 1815 &dm_plane_funcs, formats, num_formats, 1816 modifiers, plane->type, NULL); 1817 kfree(modifiers); 1818 if (res) 1819 return res; 1820 1821 if (plane->type == DRM_PLANE_TYPE_OVERLAY && 1822 plane_cap && plane_cap->per_pixel_alpha) { 1823 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | 1824 BIT(DRM_MODE_BLEND_PREMULTI) | 1825 BIT(DRM_MODE_BLEND_COVERAGE); 1826 1827 drm_plane_create_alpha_property(plane); 1828 drm_plane_create_blend_mode_property(plane, blend_caps); 1829 } 1830 1831 if (plane->type == DRM_PLANE_TYPE_PRIMARY) { 1832 /* 1833 * Allow OVERLAY planes to be used as underlays by assigning an 1834 * immutable zpos = # of OVERLAY planes to the PRIMARY plane. 1835 */ 1836 drm_plane_create_zpos_immutable_property(plane, primary_zpos); 1837 } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) { 1838 /* 1839 * OVERLAY planes can be below or above the PRIMARY, but cannot 1840 * be above the CURSOR plane. 1841 */ 1842 unsigned int zpos = primary_zpos + 1 + drm_plane_index(plane); 1843 1844 drm_plane_create_zpos_property(plane, zpos, 0, 254); 1845 } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { 1846 drm_plane_create_zpos_immutable_property(plane, 255); 1847 } 1848 1849 if (plane->type == DRM_PLANE_TYPE_PRIMARY && 1850 plane_cap && 1851 (plane_cap->pixel_format_support.nv12 || 1852 plane_cap->pixel_format_support.p010)) { 1853 /* This only affects YUV formats. */ 1854 drm_plane_create_color_properties( 1855 plane, 1856 BIT(DRM_COLOR_YCBCR_BT601) | 1857 BIT(DRM_COLOR_YCBCR_BT709) | 1858 BIT(DRM_COLOR_YCBCR_BT2020), 1859 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 1860 BIT(DRM_COLOR_YCBCR_FULL_RANGE), 1861 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); 1862 } 1863 1864 supported_rotations = 1865 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | 1866 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; 1867 1868 if (dm->adev->asic_type >= CHIP_BONAIRE && 1869 plane->type != DRM_PLANE_TYPE_CURSOR) 1870 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 1871 supported_rotations); 1872 1873 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) && 1874 plane->type != DRM_PLANE_TYPE_CURSOR) 1875 drm_plane_enable_fb_damage_clips(plane); 1876 1877 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 1878 drm_plane_helper_add(plane, &dm_primary_plane_helper_funcs); 1879 else 1880 drm_plane_helper_add(plane, &dm_plane_helper_funcs); 1881 1882 #ifdef AMD_PRIVATE_COLOR 1883 dm_atomic_plane_attach_color_mgmt_properties(dm, plane); 1884 #endif 1885 /* Create (reset) the plane state */ 1886 if (plane->funcs->reset) 1887 plane->funcs->reset(plane); 1888 1889 return 0; 1890 } 1891 1892 bool amdgpu_dm_plane_is_video_format(uint32_t format) 1893 { 1894 int i; 1895 1896 for (i = 0; i < ARRAY_SIZE(video_formats); i++) 1897 if (format == video_formats[i]) 1898 return true; 1899 1900 return false; 1901 } 1902 1903