1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: AMD 24 * 25 */ 26 27 #include <drm/drm_atomic_helper.h> 28 #include <drm/drm_blend.h> 29 #include <drm/drm_gem_atomic_helper.h> 30 #include <drm/drm_plane_helper.h> 31 #include <drm/drm_fourcc.h> 32 33 #include "amdgpu.h" 34 #include "dal_asic_id.h" 35 #include "amdgpu_display.h" 36 #include "amdgpu_dm_trace.h" 37 #include "amdgpu_dm_plane.h" 38 #include "gc/gc_11_0_0_offset.h" 39 #include "gc/gc_11_0_0_sh_mask.h" 40 41 /* 42 * TODO: these are currently initialized to rgb formats only. 43 * For future use cases we should either initialize them dynamically based on 44 * plane capabilities, or initialize this array to all formats, so internal drm 45 * check will succeed, and let DC implement proper check 46 */ 47 static const uint32_t rgb_formats[] = { 48 DRM_FORMAT_XRGB8888, 49 DRM_FORMAT_ARGB8888, 50 DRM_FORMAT_RGBA8888, 51 DRM_FORMAT_XRGB2101010, 52 DRM_FORMAT_XBGR2101010, 53 DRM_FORMAT_ARGB2101010, 54 DRM_FORMAT_ABGR2101010, 55 DRM_FORMAT_XRGB16161616, 56 DRM_FORMAT_XBGR16161616, 57 DRM_FORMAT_ARGB16161616, 58 DRM_FORMAT_ABGR16161616, 59 DRM_FORMAT_XBGR8888, 60 DRM_FORMAT_ABGR8888, 61 DRM_FORMAT_RGB565, 62 }; 63 64 static const uint32_t overlay_formats[] = { 65 DRM_FORMAT_XRGB8888, 66 DRM_FORMAT_ARGB8888, 67 DRM_FORMAT_RGBA8888, 68 DRM_FORMAT_XBGR8888, 69 DRM_FORMAT_ABGR8888, 70 DRM_FORMAT_RGB565, 71 DRM_FORMAT_NV21, 72 DRM_FORMAT_NV12, 73 DRM_FORMAT_P010 74 }; 75 76 static const uint32_t video_formats[] = { 77 DRM_FORMAT_NV21, 78 DRM_FORMAT_NV12, 79 DRM_FORMAT_P010 80 }; 81 82 static const u32 cursor_formats[] = { 83 DRM_FORMAT_ARGB8888 84 }; 85 86 enum dm_micro_swizzle { 87 MICRO_SWIZZLE_Z = 0, 88 MICRO_SWIZZLE_S = 1, 89 MICRO_SWIZZLE_D = 2, 90 MICRO_SWIZZLE_R = 3 91 }; 92 93 const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 94 { 95 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); 96 } 97 98 void fill_blending_from_plane_state(const struct drm_plane_state *plane_state, 99 bool *per_pixel_alpha, bool *pre_multiplied_alpha, 100 bool *global_alpha, int *global_alpha_value) 101 { 102 *per_pixel_alpha = false; 103 *pre_multiplied_alpha = true; 104 *global_alpha = false; 105 *global_alpha_value = 0xff; 106 107 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) 108 return; 109 110 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || 111 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { 112 static const uint32_t alpha_formats[] = { 113 DRM_FORMAT_ARGB8888, 114 DRM_FORMAT_RGBA8888, 115 DRM_FORMAT_ABGR8888, 116 }; 117 uint32_t format = plane_state->fb->format->format; 118 unsigned int i; 119 120 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { 121 if (format == alpha_formats[i]) { 122 *per_pixel_alpha = true; 123 break; 124 } 125 } 126 127 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) 128 *pre_multiplied_alpha = false; 129 } 130 131 if (plane_state->alpha < 0xffff) { 132 *global_alpha = true; 133 *global_alpha_value = plane_state->alpha >> 8; 134 } 135 } 136 137 static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) 138 { 139 if (!*mods) 140 return; 141 142 if (*cap - *size < 1) { 143 uint64_t new_cap = *cap * 2; 144 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); 145 146 if (!new_mods) { 147 kfree(*mods); 148 *mods = NULL; 149 return; 150 } 151 152 memcpy(new_mods, *mods, sizeof(uint64_t) * *size); 153 kfree(*mods); 154 *mods = new_mods; 155 *cap = new_cap; 156 } 157 158 (*mods)[*size] = mod; 159 *size += 1; 160 } 161 162 static bool modifier_has_dcc(uint64_t modifier) 163 { 164 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); 165 } 166 167 static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier) 168 { 169 if (modifier == DRM_FORMAT_MOD_LINEAR) 170 return 0; 171 172 return AMD_FMT_MOD_GET(TILE, modifier); 173 } 174 175 static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, 176 uint64_t tiling_flags) 177 { 178 /* Fill GFX8 params */ 179 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { 180 unsigned int bankw, bankh, mtaspect, tile_split, num_banks; 181 182 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 183 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 184 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 185 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 186 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 187 188 /* XXX fix me for VI */ 189 tiling_info->gfx8.num_banks = num_banks; 190 tiling_info->gfx8.array_mode = 191 DC_ARRAY_2D_TILED_THIN1; 192 tiling_info->gfx8.tile_split = tile_split; 193 tiling_info->gfx8.bank_width = bankw; 194 tiling_info->gfx8.bank_height = bankh; 195 tiling_info->gfx8.tile_aspect = mtaspect; 196 tiling_info->gfx8.tile_mode = 197 DC_ADDR_SURF_MICRO_TILING_DISPLAY; 198 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) 199 == DC_ARRAY_1D_TILED_THIN1) { 200 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; 201 } 202 203 tiling_info->gfx8.pipe_config = 204 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 205 } 206 207 static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, 208 union dc_tiling_info *tiling_info) 209 { 210 /* Fill GFX9 params */ 211 tiling_info->gfx9.num_pipes = 212 adev->gfx.config.gb_addr_config_fields.num_pipes; 213 tiling_info->gfx9.num_banks = 214 adev->gfx.config.gb_addr_config_fields.num_banks; 215 tiling_info->gfx9.pipe_interleave = 216 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; 217 tiling_info->gfx9.num_shader_engines = 218 adev->gfx.config.gb_addr_config_fields.num_se; 219 tiling_info->gfx9.max_compressed_frags = 220 adev->gfx.config.gb_addr_config_fields.max_compress_frags; 221 tiling_info->gfx9.num_rb_per_se = 222 adev->gfx.config.gb_addr_config_fields.num_rb_per_se; 223 tiling_info->gfx9.shaderEnable = 1; 224 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 225 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; 226 } 227 228 static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, 229 union dc_tiling_info *tiling_info, 230 uint64_t modifier) 231 { 232 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); 233 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); 234 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); 235 unsigned int pipes_log2; 236 237 pipes_log2 = min(5u, mod_pipe_xor_bits); 238 239 fill_gfx9_tiling_info_from_device(adev, tiling_info); 240 241 if (!IS_AMD_FMT_MOD(modifier)) 242 return; 243 244 tiling_info->gfx9.num_pipes = 1u << pipes_log2; 245 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); 246 247 if (adev->family >= AMDGPU_FAMILY_NV) { 248 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; 249 } else { 250 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; 251 252 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ 253 } 254 } 255 256 static int validate_dcc(struct amdgpu_device *adev, 257 const enum surface_pixel_format format, 258 const enum dc_rotation_angle rotation, 259 const union dc_tiling_info *tiling_info, 260 const struct dc_plane_dcc_param *dcc, 261 const struct dc_plane_address *address, 262 const struct plane_size *plane_size) 263 { 264 struct dc *dc = adev->dm.dc; 265 struct dc_dcc_surface_param input; 266 struct dc_surface_dcc_cap output; 267 268 memset(&input, 0, sizeof(input)); 269 memset(&output, 0, sizeof(output)); 270 271 if (!dcc->enable) 272 return 0; 273 274 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || 275 !dc->cap_funcs.get_dcc_compression_cap) 276 return -EINVAL; 277 278 input.format = format; 279 input.surface_size.width = plane_size->surface_size.width; 280 input.surface_size.height = plane_size->surface_size.height; 281 input.swizzle_mode = tiling_info->gfx9.swizzle; 282 283 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) 284 input.scan = SCAN_DIRECTION_HORIZONTAL; 285 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) 286 input.scan = SCAN_DIRECTION_VERTICAL; 287 288 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) 289 return -EINVAL; 290 291 if (!output.capable) 292 return -EINVAL; 293 294 if (dcc->independent_64b_blks == 0 && 295 output.grph.rgb.independent_64b_blks != 0) 296 return -EINVAL; 297 298 return 0; 299 } 300 301 static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, 302 const struct amdgpu_framebuffer *afb, 303 const enum surface_pixel_format format, 304 const enum dc_rotation_angle rotation, 305 const struct plane_size *plane_size, 306 union dc_tiling_info *tiling_info, 307 struct dc_plane_dcc_param *dcc, 308 struct dc_plane_address *address, 309 const bool force_disable_dcc) 310 { 311 const uint64_t modifier = afb->base.modifier; 312 int ret = 0; 313 314 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); 315 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); 316 317 if (modifier_has_dcc(modifier) && !force_disable_dcc) { 318 uint64_t dcc_address = afb->address + afb->base.offsets[1]; 319 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); 320 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); 321 322 dcc->enable = 1; 323 dcc->meta_pitch = afb->base.pitches[1]; 324 dcc->independent_64b_blks = independent_64b_blks; 325 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { 326 if (independent_64b_blks && independent_128b_blks) 327 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; 328 else if (independent_128b_blks) 329 dcc->dcc_ind_blk = hubp_ind_block_128b; 330 else if (independent_64b_blks && !independent_128b_blks) 331 dcc->dcc_ind_blk = hubp_ind_block_64b; 332 else 333 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 334 } else { 335 if (independent_64b_blks) 336 dcc->dcc_ind_blk = hubp_ind_block_64b; 337 else 338 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 339 } 340 341 address->grph.meta_addr.low_part = lower_32_bits(dcc_address); 342 address->grph.meta_addr.high_part = upper_32_bits(dcc_address); 343 } 344 345 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); 346 if (ret) 347 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); 348 349 return ret; 350 } 351 352 static void add_gfx10_1_modifiers(const struct amdgpu_device *adev, 353 uint64_t **mods, uint64_t *size, uint64_t *capacity) 354 { 355 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 356 357 add_modifier(mods, size, capacity, AMD_FMT_MOD | 358 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 359 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 360 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 361 AMD_FMT_MOD_SET(DCC, 1) | 362 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 363 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 364 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 365 366 add_modifier(mods, size, capacity, AMD_FMT_MOD | 367 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 368 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 369 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 370 AMD_FMT_MOD_SET(DCC, 1) | 371 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 372 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 373 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 374 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 375 376 add_modifier(mods, size, capacity, AMD_FMT_MOD | 377 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 378 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 379 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 380 381 add_modifier(mods, size, capacity, AMD_FMT_MOD | 382 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 383 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 384 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 385 386 387 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ 388 add_modifier(mods, size, capacity, AMD_FMT_MOD | 389 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 390 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 391 392 add_modifier(mods, size, capacity, AMD_FMT_MOD | 393 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 394 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 395 } 396 397 static void add_gfx9_modifiers(const struct amdgpu_device *adev, 398 uint64_t **mods, uint64_t *size, uint64_t *capacity) 399 { 400 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 401 int pipe_xor_bits = min(8, pipes + 402 ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); 403 int bank_xor_bits = min(8 - pipe_xor_bits, 404 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); 405 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + 406 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); 407 408 409 if (adev->family == AMDGPU_FAMILY_RV) { 410 /* Raven2 and later */ 411 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; 412 413 /* 414 * No _D DCC swizzles yet because we only allow 32bpp, which 415 * doesn't support _D on DCN 416 */ 417 418 if (has_constant_encode) { 419 add_modifier(mods, size, capacity, AMD_FMT_MOD | 420 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 421 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 422 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 423 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 424 AMD_FMT_MOD_SET(DCC, 1) | 425 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 426 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 427 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); 428 } 429 430 add_modifier(mods, size, capacity, AMD_FMT_MOD | 431 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 432 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 433 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 434 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 435 AMD_FMT_MOD_SET(DCC, 1) | 436 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 437 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 438 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); 439 440 if (has_constant_encode) { 441 add_modifier(mods, size, capacity, AMD_FMT_MOD | 442 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 443 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 444 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 445 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 446 AMD_FMT_MOD_SET(DCC, 1) | 447 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 448 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 449 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 450 451 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 452 AMD_FMT_MOD_SET(RB, rb) | 453 AMD_FMT_MOD_SET(PIPE, pipes)); 454 } 455 456 add_modifier(mods, size, capacity, AMD_FMT_MOD | 457 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 458 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 459 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 460 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 461 AMD_FMT_MOD_SET(DCC, 1) | 462 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 463 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 464 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 465 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | 466 AMD_FMT_MOD_SET(RB, rb) | 467 AMD_FMT_MOD_SET(PIPE, pipes)); 468 } 469 470 /* 471 * Only supported for 64bpp on Raven, will be filtered on format in 472 * dm_plane_format_mod_supported. 473 */ 474 add_modifier(mods, size, capacity, AMD_FMT_MOD | 475 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | 476 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 477 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 478 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 479 480 if (adev->family == AMDGPU_FAMILY_RV) { 481 add_modifier(mods, size, capacity, AMD_FMT_MOD | 482 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 483 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 484 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 485 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 486 } 487 488 /* 489 * Only supported for 64bpp on Raven, will be filtered on format in 490 * dm_plane_format_mod_supported. 491 */ 492 add_modifier(mods, size, capacity, AMD_FMT_MOD | 493 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 494 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 495 496 if (adev->family == AMDGPU_FAMILY_RV) { 497 add_modifier(mods, size, capacity, AMD_FMT_MOD | 498 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 499 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 500 } 501 } 502 503 static void add_gfx10_3_modifiers(const struct amdgpu_device *adev, 504 uint64_t **mods, uint64_t *size, uint64_t *capacity) 505 { 506 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 507 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); 508 509 add_modifier(mods, size, capacity, AMD_FMT_MOD | 510 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 511 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 512 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 513 AMD_FMT_MOD_SET(PACKERS, pkrs) | 514 AMD_FMT_MOD_SET(DCC, 1) | 515 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 516 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 517 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 518 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 519 520 add_modifier(mods, size, capacity, AMD_FMT_MOD | 521 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 522 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 523 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 524 AMD_FMT_MOD_SET(PACKERS, pkrs) | 525 AMD_FMT_MOD_SET(DCC, 1) | 526 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 527 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 528 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 529 530 add_modifier(mods, size, capacity, AMD_FMT_MOD | 531 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 532 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 533 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 534 AMD_FMT_MOD_SET(PACKERS, pkrs) | 535 AMD_FMT_MOD_SET(DCC, 1) | 536 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 537 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 538 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 539 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 540 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 541 542 add_modifier(mods, size, capacity, AMD_FMT_MOD | 543 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 544 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 545 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 546 AMD_FMT_MOD_SET(PACKERS, pkrs) | 547 AMD_FMT_MOD_SET(DCC, 1) | 548 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 549 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 550 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 551 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 552 553 add_modifier(mods, size, capacity, AMD_FMT_MOD | 554 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 555 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 556 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 557 AMD_FMT_MOD_SET(PACKERS, pkrs)); 558 559 add_modifier(mods, size, capacity, AMD_FMT_MOD | 560 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 561 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 562 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 563 AMD_FMT_MOD_SET(PACKERS, pkrs)); 564 565 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ 566 add_modifier(mods, size, capacity, AMD_FMT_MOD | 567 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 568 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 569 570 add_modifier(mods, size, capacity, AMD_FMT_MOD | 571 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 572 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 573 } 574 575 static void add_gfx11_modifiers(struct amdgpu_device *adev, 576 uint64_t **mods, uint64_t *size, uint64_t *capacity) 577 { 578 int num_pipes = 0; 579 int pipe_xor_bits = 0; 580 int num_pkrs = 0; 581 int pkrs = 0; 582 u32 gb_addr_config; 583 u8 i = 0; 584 unsigned swizzle_r_x; 585 uint64_t modifier_r_x; 586 uint64_t modifier_dcc_best; 587 uint64_t modifier_dcc_4k; 588 589 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from 590 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} 591 */ 592 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 593 ASSERT(gb_addr_config != 0); 594 595 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 596 pkrs = ilog2(num_pkrs); 597 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES); 598 pipe_xor_bits = ilog2(num_pipes); 599 600 for (i = 0; i < 2; i++) { 601 /* Insert the best one first. */ 602 /* R_X swizzle modes are the best for rendering and DCC requires them. */ 603 if (num_pipes > 16) 604 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X; 605 else 606 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X; 607 608 modifier_r_x = AMD_FMT_MOD | 609 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | 610 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 611 AMD_FMT_MOD_SET(TILE, swizzle_r_x) | 612 AMD_FMT_MOD_SET(PACKERS, pkrs); 613 614 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */ 615 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | 616 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) | 617 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 618 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B); 619 620 /* DCC settings for 4K and greater resolutions. (required by display hw) */ 621 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | 622 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 623 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 624 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); 625 626 add_modifier(mods, size, capacity, modifier_dcc_best); 627 add_modifier(mods, size, capacity, modifier_dcc_4k); 628 629 add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); 630 add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); 631 632 add_modifier(mods, size, capacity, modifier_r_x); 633 } 634 635 add_modifier(mods, size, capacity, AMD_FMT_MOD | 636 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | 637 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); 638 } 639 640 static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) 641 { 642 uint64_t size = 0, capacity = 128; 643 *mods = NULL; 644 645 /* We have not hooked up any pre-GFX9 modifiers. */ 646 if (adev->family < AMDGPU_FAMILY_AI) 647 return 0; 648 649 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); 650 651 if (plane_type == DRM_PLANE_TYPE_CURSOR) { 652 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 653 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 654 return *mods ? 0 : -ENOMEM; 655 } 656 657 switch (adev->family) { 658 case AMDGPU_FAMILY_AI: 659 case AMDGPU_FAMILY_RV: 660 add_gfx9_modifiers(adev, mods, &size, &capacity); 661 break; 662 case AMDGPU_FAMILY_NV: 663 case AMDGPU_FAMILY_VGH: 664 case AMDGPU_FAMILY_YC: 665 case AMDGPU_FAMILY_GC_10_3_6: 666 case AMDGPU_FAMILY_GC_10_3_7: 667 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 668 add_gfx10_3_modifiers(adev, mods, &size, &capacity); 669 else 670 add_gfx10_1_modifiers(adev, mods, &size, &capacity); 671 break; 672 case AMDGPU_FAMILY_GC_11_0_0: 673 case AMDGPU_FAMILY_GC_11_0_1: 674 add_gfx11_modifiers(adev, mods, &size, &capacity); 675 break; 676 } 677 678 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 679 680 /* INVALID marks the end of the list. */ 681 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 682 683 if (!*mods) 684 return -ENOMEM; 685 686 return 0; 687 } 688 689 static int get_plane_formats(const struct drm_plane *plane, 690 const struct dc_plane_cap *plane_cap, 691 uint32_t *formats, int max_formats) 692 { 693 int i, num_formats = 0; 694 695 /* 696 * TODO: Query support for each group of formats directly from 697 * DC plane caps. This will require adding more formats to the 698 * caps list. 699 */ 700 701 switch (plane->type) { 702 case DRM_PLANE_TYPE_PRIMARY: 703 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { 704 if (num_formats >= max_formats) 705 break; 706 707 formats[num_formats++] = rgb_formats[i]; 708 } 709 710 if (plane_cap && plane_cap->pixel_format_support.nv12) 711 formats[num_formats++] = DRM_FORMAT_NV12; 712 if (plane_cap && plane_cap->pixel_format_support.p010) 713 formats[num_formats++] = DRM_FORMAT_P010; 714 if (plane_cap && plane_cap->pixel_format_support.fp16) { 715 formats[num_formats++] = DRM_FORMAT_XRGB16161616F; 716 formats[num_formats++] = DRM_FORMAT_ARGB16161616F; 717 formats[num_formats++] = DRM_FORMAT_XBGR16161616F; 718 formats[num_formats++] = DRM_FORMAT_ABGR16161616F; 719 } 720 break; 721 722 case DRM_PLANE_TYPE_OVERLAY: 723 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { 724 if (num_formats >= max_formats) 725 break; 726 727 formats[num_formats++] = overlay_formats[i]; 728 } 729 break; 730 731 case DRM_PLANE_TYPE_CURSOR: 732 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { 733 if (num_formats >= max_formats) 734 break; 735 736 formats[num_formats++] = cursor_formats[i]; 737 } 738 break; 739 } 740 741 return num_formats; 742 } 743 744 #ifdef CONFIG_DRM_AMD_DC_HDR 745 static int attach_color_mgmt_properties(struct amdgpu_display_manager *dm, struct drm_plane *plane) 746 { 747 drm_object_attach_property(&plane->base, 748 dm->degamma_lut_property, 749 0); 750 drm_object_attach_property(&plane->base, 751 dm->degamma_lut_size_property, 752 MAX_COLOR_LUT_ENTRIES); 753 drm_object_attach_property(&plane->base, dm->ctm_property, 754 0); 755 drm_object_attach_property(&plane->base, dm->sdr_boost_property, 756 DEFAULT_SDR_BOOST); 757 758 return 0; 759 } 760 #endif 761 762 int fill_plane_buffer_attributes(struct amdgpu_device *adev, 763 const struct amdgpu_framebuffer *afb, 764 const enum surface_pixel_format format, 765 const enum dc_rotation_angle rotation, 766 const uint64_t tiling_flags, 767 union dc_tiling_info *tiling_info, 768 struct plane_size *plane_size, 769 struct dc_plane_dcc_param *dcc, 770 struct dc_plane_address *address, 771 bool tmz_surface, 772 bool force_disable_dcc) 773 { 774 const struct drm_framebuffer *fb = &afb->base; 775 int ret; 776 777 memset(tiling_info, 0, sizeof(*tiling_info)); 778 memset(plane_size, 0, sizeof(*plane_size)); 779 memset(dcc, 0, sizeof(*dcc)); 780 memset(address, 0, sizeof(*address)); 781 782 address->tmz_surface = tmz_surface; 783 784 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 785 uint64_t addr = afb->address + fb->offsets[0]; 786 787 plane_size->surface_size.x = 0; 788 plane_size->surface_size.y = 0; 789 plane_size->surface_size.width = fb->width; 790 plane_size->surface_size.height = fb->height; 791 plane_size->surface_pitch = 792 fb->pitches[0] / fb->format->cpp[0]; 793 794 address->type = PLN_ADDR_TYPE_GRAPHICS; 795 address->grph.addr.low_part = lower_32_bits(addr); 796 address->grph.addr.high_part = upper_32_bits(addr); 797 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { 798 uint64_t luma_addr = afb->address + fb->offsets[0]; 799 uint64_t chroma_addr = afb->address + fb->offsets[1]; 800 801 plane_size->surface_size.x = 0; 802 plane_size->surface_size.y = 0; 803 plane_size->surface_size.width = fb->width; 804 plane_size->surface_size.height = fb->height; 805 plane_size->surface_pitch = 806 fb->pitches[0] / fb->format->cpp[0]; 807 808 plane_size->chroma_size.x = 0; 809 plane_size->chroma_size.y = 0; 810 /* TODO: set these based on surface format */ 811 plane_size->chroma_size.width = fb->width / 2; 812 plane_size->chroma_size.height = fb->height / 2; 813 814 plane_size->chroma_pitch = 815 fb->pitches[1] / fb->format->cpp[1]; 816 817 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 818 address->video_progressive.luma_addr.low_part = 819 lower_32_bits(luma_addr); 820 address->video_progressive.luma_addr.high_part = 821 upper_32_bits(luma_addr); 822 address->video_progressive.chroma_addr.low_part = 823 lower_32_bits(chroma_addr); 824 address->video_progressive.chroma_addr.high_part = 825 upper_32_bits(chroma_addr); 826 } 827 828 if (adev->family >= AMDGPU_FAMILY_AI) { 829 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, 830 rotation, plane_size, 831 tiling_info, dcc, 832 address, 833 force_disable_dcc); 834 if (ret) 835 return ret; 836 } else { 837 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); 838 } 839 840 return 0; 841 } 842 843 static int dm_plane_helper_prepare_fb(struct drm_plane *plane, 844 struct drm_plane_state *new_state) 845 { 846 struct amdgpu_framebuffer *afb; 847 struct drm_gem_object *obj; 848 struct amdgpu_device *adev; 849 struct amdgpu_bo *rbo; 850 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 851 uint32_t domain; 852 int r; 853 854 if (!new_state->fb) { 855 DRM_DEBUG_KMS("No FB bound\n"); 856 return 0; 857 } 858 859 afb = to_amdgpu_framebuffer(new_state->fb); 860 obj = new_state->fb->obj[0]; 861 rbo = gem_to_amdgpu_bo(obj); 862 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 863 864 r = amdgpu_bo_reserve(rbo, true); 865 if (r) { 866 dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 867 return r; 868 } 869 870 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); 871 if (r) { 872 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); 873 goto error_unlock; 874 } 875 876 if (plane->type != DRM_PLANE_TYPE_CURSOR) 877 domain = amdgpu_display_supported_domains(adev, rbo->flags); 878 else 879 domain = AMDGPU_GEM_DOMAIN_VRAM; 880 881 r = amdgpu_bo_pin(rbo, domain); 882 if (unlikely(r != 0)) { 883 if (r != -ERESTARTSYS) 884 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 885 goto error_unlock; 886 } 887 888 r = amdgpu_ttm_alloc_gart(&rbo->tbo); 889 if (unlikely(r != 0)) { 890 DRM_ERROR("%p bind failed\n", rbo); 891 goto error_unpin; 892 } 893 894 r = drm_gem_plane_helper_prepare_fb(plane, new_state); 895 if (unlikely(r != 0)) 896 goto error_unpin; 897 898 amdgpu_bo_unreserve(rbo); 899 900 afb->address = amdgpu_bo_gpu_offset(rbo); 901 902 amdgpu_bo_ref(rbo); 903 904 /** 905 * We don't do surface updates on planes that have been newly created, 906 * but we also don't have the afb->address during atomic check. 907 * 908 * Fill in buffer attributes depending on the address here, but only on 909 * newly created planes since they're not being used by DC yet and this 910 * won't modify global state. 911 */ 912 dm_plane_state_old = to_dm_plane_state(plane->state); 913 dm_plane_state_new = to_dm_plane_state(new_state); 914 915 if (dm_plane_state_new->dc_state && 916 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { 917 struct dc_plane_state *plane_state = 918 dm_plane_state_new->dc_state; 919 bool force_disable_dcc = !plane_state->dcc.enable; 920 921 fill_plane_buffer_attributes( 922 adev, afb, plane_state->format, plane_state->rotation, 923 afb->tiling_flags, 924 &plane_state->tiling_info, &plane_state->plane_size, 925 &plane_state->dcc, &plane_state->address, 926 afb->tmz_surface, force_disable_dcc); 927 } 928 929 return 0; 930 931 error_unpin: 932 amdgpu_bo_unpin(rbo); 933 934 error_unlock: 935 amdgpu_bo_unreserve(rbo); 936 return r; 937 } 938 939 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, 940 struct drm_plane_state *old_state) 941 { 942 struct amdgpu_bo *rbo; 943 int r; 944 945 if (!old_state->fb) 946 return; 947 948 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 949 r = amdgpu_bo_reserve(rbo, false); 950 if (unlikely(r)) { 951 DRM_ERROR("failed to reserve rbo before unpin\n"); 952 return; 953 } 954 955 amdgpu_bo_unpin(rbo); 956 amdgpu_bo_unreserve(rbo); 957 amdgpu_bo_unref(&rbo); 958 } 959 960 static void get_min_max_dc_plane_scaling(struct drm_device *dev, 961 struct drm_framebuffer *fb, 962 int *min_downscale, int *max_upscale) 963 { 964 struct amdgpu_device *adev = drm_to_adev(dev); 965 struct dc *dc = adev->dm.dc; 966 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ 967 struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; 968 969 switch (fb->format->format) { 970 case DRM_FORMAT_P010: 971 case DRM_FORMAT_NV12: 972 case DRM_FORMAT_NV21: 973 *max_upscale = plane_cap->max_upscale_factor.nv12; 974 *min_downscale = plane_cap->max_downscale_factor.nv12; 975 break; 976 977 case DRM_FORMAT_XRGB16161616F: 978 case DRM_FORMAT_ARGB16161616F: 979 case DRM_FORMAT_XBGR16161616F: 980 case DRM_FORMAT_ABGR16161616F: 981 *max_upscale = plane_cap->max_upscale_factor.fp16; 982 *min_downscale = plane_cap->max_downscale_factor.fp16; 983 break; 984 985 default: 986 *max_upscale = plane_cap->max_upscale_factor.argb8888; 987 *min_downscale = plane_cap->max_downscale_factor.argb8888; 988 break; 989 } 990 991 /* 992 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a 993 * scaling factor of 1.0 == 1000 units. 994 */ 995 if (*max_upscale == 1) 996 *max_upscale = 1000; 997 998 if (*min_downscale == 1) 999 *min_downscale = 1000; 1000 } 1001 1002 int dm_plane_helper_check_state(struct drm_plane_state *state, 1003 struct drm_crtc_state *new_crtc_state) 1004 { 1005 struct drm_framebuffer *fb = state->fb; 1006 int min_downscale, max_upscale; 1007 int min_scale = 0; 1008 int max_scale = INT_MAX; 1009 1010 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ 1011 if (fb && state->crtc) { 1012 /* Validate viewport to cover the case when only the position changes */ 1013 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { 1014 int viewport_width = state->crtc_w; 1015 int viewport_height = state->crtc_h; 1016 1017 if (state->crtc_x < 0) 1018 viewport_width += state->crtc_x; 1019 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) 1020 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; 1021 1022 if (state->crtc_y < 0) 1023 viewport_height += state->crtc_y; 1024 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) 1025 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; 1026 1027 if (viewport_width < 0 || viewport_height < 0) { 1028 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); 1029 return -EINVAL; 1030 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ 1031 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); 1032 return -EINVAL; 1033 } else if (viewport_height < MIN_VIEWPORT_SIZE) { 1034 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); 1035 return -EINVAL; 1036 } 1037 1038 } 1039 1040 /* Get min/max allowed scaling factors from plane caps. */ 1041 get_min_max_dc_plane_scaling(state->crtc->dev, fb, 1042 &min_downscale, &max_upscale); 1043 /* 1044 * Convert to drm convention: 16.16 fixed point, instead of dc's 1045 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's 1046 * dst/src, so min_scale = 1.0 / max_upscale, etc. 1047 */ 1048 min_scale = (1000 << 16) / max_upscale; 1049 max_scale = (1000 << 16) / min_downscale; 1050 } 1051 1052 return drm_atomic_helper_check_plane_state( 1053 state, new_crtc_state, min_scale, max_scale, true, true); 1054 } 1055 1056 int fill_dc_scaling_info(struct amdgpu_device *adev, 1057 const struct drm_plane_state *state, 1058 struct dc_scaling_info *scaling_info) 1059 { 1060 int scale_w, scale_h, min_downscale, max_upscale; 1061 1062 memset(scaling_info, 0, sizeof(*scaling_info)); 1063 1064 /* Source is fixed 16.16 but we ignore mantissa for now... */ 1065 scaling_info->src_rect.x = state->src_x >> 16; 1066 scaling_info->src_rect.y = state->src_y >> 16; 1067 1068 /* 1069 * For reasons we don't (yet) fully understand a non-zero 1070 * src_y coordinate into an NV12 buffer can cause a 1071 * system hang on DCN1x. 1072 * To avoid hangs (and maybe be overly cautious) 1073 * let's reject both non-zero src_x and src_y. 1074 * 1075 * We currently know of only one use-case to reproduce a 1076 * scenario with non-zero src_x and src_y for NV12, which 1077 * is to gesture the YouTube Android app into full screen 1078 * on ChromeOS. 1079 */ 1080 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || 1081 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && 1082 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && 1083 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) 1084 return -EINVAL; 1085 1086 scaling_info->src_rect.width = state->src_w >> 16; 1087 if (scaling_info->src_rect.width == 0) 1088 return -EINVAL; 1089 1090 scaling_info->src_rect.height = state->src_h >> 16; 1091 if (scaling_info->src_rect.height == 0) 1092 return -EINVAL; 1093 1094 scaling_info->dst_rect.x = state->crtc_x; 1095 scaling_info->dst_rect.y = state->crtc_y; 1096 1097 if (state->crtc_w == 0) 1098 return -EINVAL; 1099 1100 scaling_info->dst_rect.width = state->crtc_w; 1101 1102 if (state->crtc_h == 0) 1103 return -EINVAL; 1104 1105 scaling_info->dst_rect.height = state->crtc_h; 1106 1107 /* DRM doesn't specify clipping on destination output. */ 1108 scaling_info->clip_rect = scaling_info->dst_rect; 1109 1110 /* Validate scaling per-format with DC plane caps */ 1111 if (state->plane && state->plane->dev && state->fb) { 1112 get_min_max_dc_plane_scaling(state->plane->dev, state->fb, 1113 &min_downscale, &max_upscale); 1114 } else { 1115 min_downscale = 250; 1116 max_upscale = 16000; 1117 } 1118 1119 scale_w = scaling_info->dst_rect.width * 1000 / 1120 scaling_info->src_rect.width; 1121 1122 if (scale_w < min_downscale || scale_w > max_upscale) 1123 return -EINVAL; 1124 1125 scale_h = scaling_info->dst_rect.height * 1000 / 1126 scaling_info->src_rect.height; 1127 1128 if (scale_h < min_downscale || scale_h > max_upscale) 1129 return -EINVAL; 1130 1131 /* 1132 * The "scaling_quality" can be ignored for now, quality = 0 has DC 1133 * assume reasonable defaults based on the format. 1134 */ 1135 1136 return 0; 1137 } 1138 1139 static int dm_plane_atomic_check(struct drm_plane *plane, 1140 struct drm_atomic_state *state) 1141 { 1142 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 1143 plane); 1144 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1145 struct dc *dc = adev->dm.dc; 1146 struct dm_plane_state *dm_plane_state; 1147 struct dc_scaling_info scaling_info; 1148 struct drm_crtc_state *new_crtc_state; 1149 int ret; 1150 1151 trace_amdgpu_dm_plane_atomic_check(new_plane_state); 1152 1153 dm_plane_state = to_dm_plane_state(new_plane_state); 1154 1155 if (!dm_plane_state->dc_state) 1156 return 0; 1157 1158 new_crtc_state = 1159 drm_atomic_get_new_crtc_state(state, 1160 new_plane_state->crtc); 1161 if (!new_crtc_state) 1162 return -EINVAL; 1163 1164 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); 1165 if (ret) 1166 return ret; 1167 1168 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info); 1169 if (ret) 1170 return ret; 1171 1172 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 1173 return 0; 1174 1175 return -EINVAL; 1176 } 1177 1178 static int dm_plane_atomic_async_check(struct drm_plane *plane, 1179 struct drm_atomic_state *state) 1180 { 1181 /* Only support async updates on cursor planes. */ 1182 if (plane->type != DRM_PLANE_TYPE_CURSOR) 1183 return -EINVAL; 1184 1185 return 0; 1186 } 1187 1188 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, 1189 struct dc_cursor_position *position) 1190 { 1191 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1192 int x, y; 1193 int xorigin = 0, yorigin = 0; 1194 1195 if (!crtc || !plane->state->fb) 1196 return 0; 1197 1198 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || 1199 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { 1200 DRM_ERROR("%s: bad cursor width or height %d x %d\n", 1201 __func__, 1202 plane->state->crtc_w, 1203 plane->state->crtc_h); 1204 return -EINVAL; 1205 } 1206 1207 x = plane->state->crtc_x; 1208 y = plane->state->crtc_y; 1209 1210 if (x <= -amdgpu_crtc->max_cursor_width || 1211 y <= -amdgpu_crtc->max_cursor_height) 1212 return 0; 1213 1214 if (x < 0) { 1215 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 1216 x = 0; 1217 } 1218 if (y < 0) { 1219 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 1220 y = 0; 1221 } 1222 position->enable = true; 1223 position->translate_by_source = true; 1224 position->x = x; 1225 position->y = y; 1226 position->x_hotspot = xorigin; 1227 position->y_hotspot = yorigin; 1228 1229 return 0; 1230 } 1231 1232 void handle_cursor_update(struct drm_plane *plane, 1233 struct drm_plane_state *old_plane_state) 1234 { 1235 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1236 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 1237 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 1238 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 1239 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1240 uint64_t address = afb ? afb->address : 0; 1241 struct dc_cursor_position position = {0}; 1242 struct dc_cursor_attributes attributes; 1243 int ret; 1244 1245 if (!plane->state->fb && !old_plane_state->fb) 1246 return; 1247 1248 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", 1249 __func__, 1250 amdgpu_crtc->crtc_id, 1251 plane->state->crtc_w, 1252 plane->state->crtc_h); 1253 1254 ret = get_cursor_position(plane, crtc, &position); 1255 if (ret) 1256 return; 1257 1258 if (!position.enable) { 1259 /* turn off cursor */ 1260 if (crtc_state && crtc_state->stream) { 1261 mutex_lock(&adev->dm.dc_lock); 1262 dc_stream_set_cursor_position(crtc_state->stream, 1263 &position); 1264 mutex_unlock(&adev->dm.dc_lock); 1265 } 1266 return; 1267 } 1268 1269 amdgpu_crtc->cursor_width = plane->state->crtc_w; 1270 amdgpu_crtc->cursor_height = plane->state->crtc_h; 1271 1272 memset(&attributes, 0, sizeof(attributes)); 1273 attributes.address.high_part = upper_32_bits(address); 1274 attributes.address.low_part = lower_32_bits(address); 1275 attributes.width = plane->state->crtc_w; 1276 attributes.height = plane->state->crtc_h; 1277 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 1278 attributes.rotation_angle = 0; 1279 attributes.attribute_flags.value = 0; 1280 1281 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 1282 1283 if (crtc_state->stream) { 1284 mutex_lock(&adev->dm.dc_lock); 1285 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 1286 &attributes)) 1287 DRM_ERROR("DC failed to set cursor attributes\n"); 1288 1289 if (!dc_stream_set_cursor_position(crtc_state->stream, 1290 &position)) 1291 DRM_ERROR("DC failed to set cursor position\n"); 1292 mutex_unlock(&adev->dm.dc_lock); 1293 } 1294 } 1295 1296 static void dm_plane_atomic_async_update(struct drm_plane *plane, 1297 struct drm_atomic_state *state) 1298 { 1299 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 1300 plane); 1301 struct drm_plane_state *old_state = 1302 drm_atomic_get_old_plane_state(state, plane); 1303 1304 trace_amdgpu_dm_atomic_update_cursor(new_state); 1305 1306 swap(plane->state->fb, new_state->fb); 1307 1308 plane->state->src_x = new_state->src_x; 1309 plane->state->src_y = new_state->src_y; 1310 plane->state->src_w = new_state->src_w; 1311 plane->state->src_h = new_state->src_h; 1312 plane->state->crtc_x = new_state->crtc_x; 1313 plane->state->crtc_y = new_state->crtc_y; 1314 plane->state->crtc_w = new_state->crtc_w; 1315 plane->state->crtc_h = new_state->crtc_h; 1316 1317 handle_cursor_update(plane, old_state); 1318 } 1319 1320 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { 1321 .prepare_fb = dm_plane_helper_prepare_fb, 1322 .cleanup_fb = dm_plane_helper_cleanup_fb, 1323 .atomic_check = dm_plane_atomic_check, 1324 .atomic_async_check = dm_plane_atomic_async_check, 1325 .atomic_async_update = dm_plane_atomic_async_update 1326 }; 1327 1328 static void dm_drm_plane_reset(struct drm_plane *plane) 1329 { 1330 struct dm_plane_state *amdgpu_state = NULL; 1331 1332 if (plane->state) 1333 plane->funcs->atomic_destroy_state(plane, plane->state); 1334 1335 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); 1336 WARN_ON(amdgpu_state == NULL); 1337 1338 if (amdgpu_state) 1339 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); 1340 #ifdef CONFIG_DRM_AMD_DC_HDR 1341 if (amdgpu_state) 1342 amdgpu_state->sdr_boost = DEFAULT_SDR_BOOST; 1343 #endif 1344 } 1345 1346 static struct drm_plane_state * 1347 dm_drm_plane_duplicate_state(struct drm_plane *plane) 1348 { 1349 struct dm_plane_state *dm_plane_state, *old_dm_plane_state; 1350 1351 old_dm_plane_state = to_dm_plane_state(plane->state); 1352 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); 1353 if (!dm_plane_state) 1354 return NULL; 1355 1356 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); 1357 1358 if (old_dm_plane_state->dc_state) { 1359 dm_plane_state->dc_state = old_dm_plane_state->dc_state; 1360 dc_plane_state_retain(dm_plane_state->dc_state); 1361 } 1362 1363 #ifdef CONFIG_DRM_AMD_DC_HDR 1364 if (dm_plane_state->degamma_lut) 1365 drm_property_blob_get(dm_plane_state->degamma_lut); 1366 if (dm_plane_state->ctm) 1367 drm_property_blob_get(dm_plane_state->ctm); 1368 1369 dm_plane_state->sdr_boost = old_dm_plane_state->sdr_boost; 1370 #endif 1371 1372 return &dm_plane_state->base; 1373 } 1374 1375 static bool dm_plane_format_mod_supported(struct drm_plane *plane, 1376 uint32_t format, 1377 uint64_t modifier) 1378 { 1379 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1380 const struct drm_format_info *info = drm_format_info(format); 1381 int i; 1382 1383 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; 1384 1385 if (!info) 1386 return false; 1387 1388 /* 1389 * We always have to allow these modifiers: 1390 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. 1391 * 2. Not passing any modifiers is the same as explicitly passing INVALID. 1392 */ 1393 if (modifier == DRM_FORMAT_MOD_LINEAR || 1394 modifier == DRM_FORMAT_MOD_INVALID) { 1395 return true; 1396 } 1397 1398 /* Check that the modifier is on the list of the plane's supported modifiers. */ 1399 for (i = 0; i < plane->modifier_count; i++) { 1400 if (modifier == plane->modifiers[i]) 1401 break; 1402 } 1403 if (i == plane->modifier_count) 1404 return false; 1405 1406 /* 1407 * For D swizzle the canonical modifier depends on the bpp, so check 1408 * it here. 1409 */ 1410 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && 1411 adev->family >= AMDGPU_FAMILY_NV) { 1412 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) 1413 return false; 1414 } 1415 1416 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && 1417 info->cpp[0] < 8) 1418 return false; 1419 1420 if (modifier_has_dcc(modifier)) { 1421 /* Per radeonsi comments 16/64 bpp are more complicated. */ 1422 if (info->cpp[0] != 4) 1423 return false; 1424 /* We support multi-planar formats, but not when combined with 1425 * additional DCC metadata planes. 1426 */ 1427 if (info->num_planes > 1) 1428 return false; 1429 } 1430 1431 return true; 1432 } 1433 1434 static void dm_drm_plane_destroy_state(struct drm_plane *plane, 1435 struct drm_plane_state *state) 1436 { 1437 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1438 1439 #ifdef CONFIG_DRM_AMD_DC_HDR 1440 drm_property_blob_put(dm_plane_state->degamma_lut); 1441 drm_property_blob_put(dm_plane_state->ctm); 1442 #endif 1443 if (dm_plane_state->dc_state) 1444 dc_plane_state_release(dm_plane_state->dc_state); 1445 1446 drm_atomic_helper_plane_destroy_state(plane, state); 1447 } 1448 1449 #ifdef CONFIG_DRM_AMD_DC_HDR 1450 /* copied from drm_atomic_uapi.c */ 1451 static int atomic_replace_property_blob_from_id(struct drm_device *dev, 1452 struct drm_property_blob **blob, 1453 uint64_t blob_id, 1454 ssize_t expected_size, 1455 ssize_t expected_elem_size, 1456 bool *replaced) 1457 { 1458 struct drm_property_blob *new_blob = NULL; 1459 1460 if (blob_id != 0) { 1461 new_blob = drm_property_lookup_blob(dev, blob_id); 1462 if (new_blob == NULL) 1463 return -EINVAL; 1464 1465 if (expected_size > 0 && 1466 new_blob->length != expected_size) { 1467 drm_property_blob_put(new_blob); 1468 return -EINVAL; 1469 } 1470 if (expected_elem_size > 0 && 1471 new_blob->length % expected_elem_size != 0) { 1472 drm_property_blob_put(new_blob); 1473 return -EINVAL; 1474 } 1475 } 1476 1477 *replaced |= drm_property_replace_blob(blob, new_blob); 1478 drm_property_blob_put(new_blob); 1479 1480 return 0; 1481 } 1482 1483 int dm_drm_plane_set_property(struct drm_plane *plane, 1484 struct drm_plane_state *state, 1485 struct drm_property *property, 1486 uint64_t val) 1487 { 1488 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1489 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1490 int ret = 0; 1491 bool replaced; 1492 1493 if (property == adev->dm.degamma_lut_property) { 1494 ret = atomic_replace_property_blob_from_id(adev_to_drm(adev), 1495 &dm_plane_state->degamma_lut, 1496 val, -1, sizeof(struct drm_color_lut), 1497 &replaced); 1498 } else if (property == adev->dm.ctm_property) { 1499 ret = atomic_replace_property_blob_from_id(adev_to_drm(adev), 1500 &dm_plane_state->ctm, 1501 val, 1502 sizeof(struct drm_color_ctm), -1, 1503 &replaced); 1504 } else if (property == adev->dm.sdr_boost_property) { 1505 dm_plane_state->sdr_boost = val; 1506 } else { 1507 return -EINVAL; 1508 } 1509 1510 return ret; 1511 } 1512 1513 int dm_drm_plane_get_property(struct drm_plane *plane, 1514 const struct drm_plane_state *state, 1515 struct drm_property *property, 1516 uint64_t *val) 1517 { 1518 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 1519 struct amdgpu_device *adev = drm_to_adev(plane->dev); 1520 1521 if (property == adev->dm.degamma_lut_property) { 1522 *val = (dm_plane_state->degamma_lut) ? 1523 dm_plane_state->degamma_lut->base.id : 0; 1524 } else if (property == adev->dm.ctm_property) { 1525 *val = (dm_plane_state->ctm) ? dm_plane_state->ctm->base.id : 0; 1526 } else if (property == adev->dm.sdr_boost_property) { 1527 *val = dm_plane_state->sdr_boost; 1528 } else { 1529 return -EINVAL; 1530 } 1531 1532 return 0; 1533 } 1534 #endif 1535 1536 static const struct drm_plane_funcs dm_plane_funcs = { 1537 .update_plane = drm_atomic_helper_update_plane, 1538 .disable_plane = drm_atomic_helper_disable_plane, 1539 .destroy = drm_plane_helper_destroy, 1540 .reset = dm_drm_plane_reset, 1541 .atomic_duplicate_state = dm_drm_plane_duplicate_state, 1542 .atomic_destroy_state = dm_drm_plane_destroy_state, 1543 .format_mod_supported = dm_plane_format_mod_supported, 1544 #ifdef CONFIG_DRM_AMD_DC_HDR 1545 .atomic_set_property = dm_drm_plane_set_property, 1546 .atomic_get_property = dm_drm_plane_get_property, 1547 #endif 1548 }; 1549 1550 int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 1551 struct drm_plane *plane, 1552 unsigned long possible_crtcs, 1553 const struct dc_plane_cap *plane_cap) 1554 { 1555 uint32_t formats[32]; 1556 int num_formats; 1557 int res = -EPERM; 1558 unsigned int supported_rotations; 1559 uint64_t *modifiers = NULL; 1560 1561 num_formats = get_plane_formats(plane, plane_cap, formats, 1562 ARRAY_SIZE(formats)); 1563 1564 res = get_plane_modifiers(dm->adev, plane->type, &modifiers); 1565 if (res) 1566 return res; 1567 1568 if (modifiers == NULL) 1569 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true; 1570 1571 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, 1572 &dm_plane_funcs, formats, num_formats, 1573 modifiers, plane->type, NULL); 1574 kfree(modifiers); 1575 if (res) 1576 return res; 1577 1578 if (plane->type == DRM_PLANE_TYPE_OVERLAY && 1579 plane_cap && plane_cap->per_pixel_alpha) { 1580 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | 1581 BIT(DRM_MODE_BLEND_PREMULTI) | 1582 BIT(DRM_MODE_BLEND_COVERAGE); 1583 1584 drm_plane_create_alpha_property(plane); 1585 drm_plane_create_blend_mode_property(plane, blend_caps); 1586 } 1587 1588 if (plane->type == DRM_PLANE_TYPE_PRIMARY && 1589 plane_cap && 1590 (plane_cap->pixel_format_support.nv12 || 1591 plane_cap->pixel_format_support.p010)) { 1592 /* This only affects YUV formats. */ 1593 drm_plane_create_color_properties( 1594 plane, 1595 BIT(DRM_COLOR_YCBCR_BT601) | 1596 BIT(DRM_COLOR_YCBCR_BT709) | 1597 BIT(DRM_COLOR_YCBCR_BT2020), 1598 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 1599 BIT(DRM_COLOR_YCBCR_FULL_RANGE), 1600 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); 1601 } 1602 1603 supported_rotations = 1604 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | 1605 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; 1606 1607 if (dm->adev->asic_type >= CHIP_BONAIRE && 1608 plane->type != DRM_PLANE_TYPE_CURSOR) 1609 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 1610 supported_rotations); 1611 1612 if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) && 1613 plane->type != DRM_PLANE_TYPE_CURSOR) 1614 drm_plane_enable_fb_damage_clips(plane); 1615 1616 drm_plane_helper_add(plane, &dm_plane_helper_funcs); 1617 1618 #ifdef CONFIG_DRM_AMD_DC_HDR 1619 attach_color_mgmt_properties(dm, plane); 1620 #endif 1621 /* Create (reset) the plane state */ 1622 if (plane->funcs->reset) 1623 plane->funcs->reset(plane); 1624 1625 return 0; 1626 } 1627 1628 bool is_video_format(uint32_t format) 1629 { 1630 int i; 1631 1632 for (i = 0; i < ARRAY_SIZE(video_formats); i++) 1633 if (format == video_formats[i]) 1634 return true; 1635 1636 return false; 1637 } 1638 1639