1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_H__ 29 #define __AMDGPU_H__ 30 31 #include <linux/atomic.h> 32 #include <linux/wait.h> 33 #include <linux/list.h> 34 #include <linux/kref.h> 35 #include <linux/interval_tree.h> 36 #include <linux/hashtable.h> 37 #include <linux/fence.h> 38 39 #include <ttm/ttm_bo_api.h> 40 #include <ttm/ttm_bo_driver.h> 41 #include <ttm/ttm_placement.h> 42 #include <ttm/ttm_module.h> 43 #include <ttm/ttm_execbuf_util.h> 44 45 #include <drm/drmP.h> 46 #include <drm/drm_gem.h> 47 #include <drm/amdgpu_drm.h> 48 49 #include "amd_shared.h" 50 #include "amdgpu_mode.h" 51 #include "amdgpu_ih.h" 52 #include "amdgpu_irq.h" 53 #include "amdgpu_ucode.h" 54 #include "amdgpu_gds.h" 55 #include "amd_powerplay.h" 56 #include "amdgpu_acp.h" 57 58 #include "gpu_scheduler.h" 59 60 /* 61 * Modules parameters. 62 */ 63 extern int amdgpu_modeset; 64 extern int amdgpu_vram_limit; 65 extern int amdgpu_gart_size; 66 extern int amdgpu_benchmarking; 67 extern int amdgpu_testing; 68 extern int amdgpu_audio; 69 extern int amdgpu_disp_priority; 70 extern int amdgpu_hw_i2c; 71 extern int amdgpu_pcie_gen2; 72 extern int amdgpu_msi; 73 extern int amdgpu_lockup_timeout; 74 extern int amdgpu_dpm; 75 extern int amdgpu_smc_load_fw; 76 extern int amdgpu_aspm; 77 extern int amdgpu_runtime_pm; 78 extern unsigned amdgpu_ip_block_mask; 79 extern int amdgpu_bapm; 80 extern int amdgpu_deep_color; 81 extern int amdgpu_vm_size; 82 extern int amdgpu_vm_block_size; 83 extern int amdgpu_vm_fault_stop; 84 extern int amdgpu_vm_debug; 85 extern int amdgpu_sched_jobs; 86 extern int amdgpu_sched_hw_submission; 87 extern int amdgpu_powerplay; 88 extern int amdgpu_powercontainment; 89 extern unsigned amdgpu_pcie_gen_cap; 90 extern unsigned amdgpu_pcie_lane_cap; 91 extern unsigned amdgpu_cg_mask; 92 extern unsigned amdgpu_pg_mask; 93 94 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 95 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 96 #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 97 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ 98 #define AMDGPU_IB_POOL_SIZE 16 99 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 100 #define AMDGPUFB_CONN_LIMIT 4 101 #define AMDGPU_BIOS_NUM_SCRATCH 8 102 103 /* max number of rings */ 104 #define AMDGPU_MAX_RINGS 16 105 #define AMDGPU_MAX_GFX_RINGS 1 106 #define AMDGPU_MAX_COMPUTE_RINGS 8 107 #define AMDGPU_MAX_VCE_RINGS 2 108 109 /* max number of IP instances */ 110 #define AMDGPU_MAX_SDMA_INSTANCES 2 111 112 /* hardcode that limit for now */ 113 #define AMDGPU_VA_RESERVED_SIZE (8 << 20) 114 115 /* hard reset data */ 116 #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 117 118 /* reset flags */ 119 #define AMDGPU_RESET_GFX (1 << 0) 120 #define AMDGPU_RESET_COMPUTE (1 << 1) 121 #define AMDGPU_RESET_DMA (1 << 2) 122 #define AMDGPU_RESET_CP (1 << 3) 123 #define AMDGPU_RESET_GRBM (1 << 4) 124 #define AMDGPU_RESET_DMA1 (1 << 5) 125 #define AMDGPU_RESET_RLC (1 << 6) 126 #define AMDGPU_RESET_SEM (1 << 7) 127 #define AMDGPU_RESET_IH (1 << 8) 128 #define AMDGPU_RESET_VMC (1 << 9) 129 #define AMDGPU_RESET_MC (1 << 10) 130 #define AMDGPU_RESET_DISPLAY (1 << 11) 131 #define AMDGPU_RESET_UVD (1 << 12) 132 #define AMDGPU_RESET_VCE (1 << 13) 133 #define AMDGPU_RESET_VCE1 (1 << 14) 134 135 /* GFX current status */ 136 #define AMDGPU_GFX_NORMAL_MODE 0x00000000L 137 #define AMDGPU_GFX_SAFE_MODE 0x00000001L 138 #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L 139 #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L 140 #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L 141 142 /* max cursor sizes (in pixels) */ 143 #define CIK_CURSOR_WIDTH 128 144 #define CIK_CURSOR_HEIGHT 128 145 146 struct amdgpu_device; 147 struct amdgpu_ib; 148 struct amdgpu_vm; 149 struct amdgpu_ring; 150 struct amdgpu_cs_parser; 151 struct amdgpu_job; 152 struct amdgpu_irq_src; 153 struct amdgpu_fpriv; 154 155 enum amdgpu_cp_irq { 156 AMDGPU_CP_IRQ_GFX_EOP = 0, 157 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, 158 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, 159 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, 160 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, 161 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, 162 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, 163 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, 164 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, 165 166 AMDGPU_CP_IRQ_LAST 167 }; 168 169 enum amdgpu_sdma_irq { 170 AMDGPU_SDMA_IRQ_TRAP0 = 0, 171 AMDGPU_SDMA_IRQ_TRAP1, 172 173 AMDGPU_SDMA_IRQ_LAST 174 }; 175 176 enum amdgpu_thermal_irq { 177 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, 178 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, 179 180 AMDGPU_THERMAL_IRQ_LAST 181 }; 182 183 int amdgpu_set_clockgating_state(struct amdgpu_device *adev, 184 enum amd_ip_block_type block_type, 185 enum amd_clockgating_state state); 186 int amdgpu_set_powergating_state(struct amdgpu_device *adev, 187 enum amd_ip_block_type block_type, 188 enum amd_powergating_state state); 189 190 struct amdgpu_ip_block_version { 191 enum amd_ip_block_type type; 192 u32 major; 193 u32 minor; 194 u32 rev; 195 const struct amd_ip_funcs *funcs; 196 }; 197 198 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, 199 enum amd_ip_block_type type, 200 u32 major, u32 minor); 201 202 const struct amdgpu_ip_block_version * amdgpu_get_ip_block( 203 struct amdgpu_device *adev, 204 enum amd_ip_block_type type); 205 206 /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ 207 struct amdgpu_buffer_funcs { 208 /* maximum bytes in a single operation */ 209 uint32_t copy_max_bytes; 210 211 /* number of dw to reserve per operation */ 212 unsigned copy_num_dw; 213 214 /* used for buffer migration */ 215 void (*emit_copy_buffer)(struct amdgpu_ib *ib, 216 /* src addr in bytes */ 217 uint64_t src_offset, 218 /* dst addr in bytes */ 219 uint64_t dst_offset, 220 /* number of byte to transfer */ 221 uint32_t byte_count); 222 223 /* maximum bytes in a single operation */ 224 uint32_t fill_max_bytes; 225 226 /* number of dw to reserve per operation */ 227 unsigned fill_num_dw; 228 229 /* used for buffer clearing */ 230 void (*emit_fill_buffer)(struct amdgpu_ib *ib, 231 /* value to write to memory */ 232 uint32_t src_data, 233 /* dst addr in bytes */ 234 uint64_t dst_offset, 235 /* number of byte to fill */ 236 uint32_t byte_count); 237 }; 238 239 /* provided by hw blocks that can write ptes, e.g., sdma */ 240 struct amdgpu_vm_pte_funcs { 241 /* copy pte entries from GART */ 242 void (*copy_pte)(struct amdgpu_ib *ib, 243 uint64_t pe, uint64_t src, 244 unsigned count); 245 /* write pte one entry at a time with addr mapping */ 246 void (*write_pte)(struct amdgpu_ib *ib, 247 const dma_addr_t *pages_addr, uint64_t pe, 248 uint64_t addr, unsigned count, 249 uint32_t incr, uint32_t flags); 250 /* for linear pte/pde updates without addr mapping */ 251 void (*set_pte_pde)(struct amdgpu_ib *ib, 252 uint64_t pe, 253 uint64_t addr, unsigned count, 254 uint32_t incr, uint32_t flags); 255 }; 256 257 /* provided by the gmc block */ 258 struct amdgpu_gart_funcs { 259 /* flush the vm tlb via mmio */ 260 void (*flush_gpu_tlb)(struct amdgpu_device *adev, 261 uint32_t vmid); 262 /* write pte/pde updates using the cpu */ 263 int (*set_pte_pde)(struct amdgpu_device *adev, 264 void *cpu_pt_addr, /* cpu addr of page table */ 265 uint32_t gpu_page_idx, /* pte/pde to update */ 266 uint64_t addr, /* addr to write into pte/pde */ 267 uint32_t flags); /* access flags */ 268 }; 269 270 /* provided by the ih block */ 271 struct amdgpu_ih_funcs { 272 /* ring read/write ptr handling, called from interrupt context */ 273 u32 (*get_wptr)(struct amdgpu_device *adev); 274 void (*decode_iv)(struct amdgpu_device *adev, 275 struct amdgpu_iv_entry *entry); 276 void (*set_rptr)(struct amdgpu_device *adev); 277 }; 278 279 /* provided by hw blocks that expose a ring buffer for commands */ 280 struct amdgpu_ring_funcs { 281 /* ring read/write ptr handling */ 282 u32 (*get_rptr)(struct amdgpu_ring *ring); 283 u32 (*get_wptr)(struct amdgpu_ring *ring); 284 void (*set_wptr)(struct amdgpu_ring *ring); 285 /* validating and patching of IBs */ 286 int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); 287 /* command emit functions */ 288 void (*emit_ib)(struct amdgpu_ring *ring, 289 struct amdgpu_ib *ib, 290 unsigned vm_id, bool ctx_switch); 291 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, 292 uint64_t seq, unsigned flags); 293 void (*emit_pipeline_sync)(struct amdgpu_ring *ring); 294 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, 295 uint64_t pd_addr); 296 void (*emit_hdp_flush)(struct amdgpu_ring *ring); 297 void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); 298 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, 299 uint32_t gds_base, uint32_t gds_size, 300 uint32_t gws_base, uint32_t gws_size, 301 uint32_t oa_base, uint32_t oa_size); 302 /* testing functions */ 303 int (*test_ring)(struct amdgpu_ring *ring); 304 int (*test_ib)(struct amdgpu_ring *ring); 305 /* insert NOP packets */ 306 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); 307 /* pad the indirect buffer to the necessary number of dw */ 308 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 309 unsigned (*init_cond_exec)(struct amdgpu_ring *ring); 310 void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); 311 }; 312 313 /* 314 * BIOS. 315 */ 316 bool amdgpu_get_bios(struct amdgpu_device *adev); 317 bool amdgpu_read_bios(struct amdgpu_device *adev); 318 319 /* 320 * Dummy page 321 */ 322 struct amdgpu_dummy_page { 323 struct page *page; 324 dma_addr_t addr; 325 }; 326 int amdgpu_dummy_page_init(struct amdgpu_device *adev); 327 void amdgpu_dummy_page_fini(struct amdgpu_device *adev); 328 329 330 /* 331 * Clocks 332 */ 333 334 #define AMDGPU_MAX_PPLL 3 335 336 struct amdgpu_clock { 337 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; 338 struct amdgpu_pll spll; 339 struct amdgpu_pll mpll; 340 /* 10 Khz units */ 341 uint32_t default_mclk; 342 uint32_t default_sclk; 343 uint32_t default_dispclk; 344 uint32_t current_dispclk; 345 uint32_t dp_extclk; 346 uint32_t max_pixel_clock; 347 }; 348 349 /* 350 * Fences. 351 */ 352 struct amdgpu_fence_driver { 353 uint64_t gpu_addr; 354 volatile uint32_t *cpu_addr; 355 /* sync_seq is protected by ring emission lock */ 356 uint32_t sync_seq; 357 atomic_t last_seq; 358 bool initialized; 359 struct amdgpu_irq_src *irq_src; 360 unsigned irq_type; 361 struct timer_list fallback_timer; 362 unsigned num_fences_mask; 363 spinlock_t lock; 364 struct fence **fences; 365 }; 366 367 /* some special values for the owner field */ 368 #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) 369 #define AMDGPU_FENCE_OWNER_VM ((void*)1ul) 370 371 #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 372 #define AMDGPU_FENCE_FLAG_INT (1 << 1) 373 374 int amdgpu_fence_driver_init(struct amdgpu_device *adev); 375 void amdgpu_fence_driver_fini(struct amdgpu_device *adev); 376 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); 377 378 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, 379 unsigned num_hw_submission); 380 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 381 struct amdgpu_irq_src *irq_src, 382 unsigned irq_type); 383 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); 384 void amdgpu_fence_driver_resume(struct amdgpu_device *adev); 385 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); 386 void amdgpu_fence_process(struct amdgpu_ring *ring); 387 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 388 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 389 390 /* 391 * TTM. 392 */ 393 394 #define AMDGPU_TTM_LRU_SIZE 20 395 396 struct amdgpu_mman_lru { 397 struct list_head *lru[TTM_NUM_MEM_TYPES]; 398 struct list_head *swap_lru; 399 }; 400 401 struct amdgpu_mman { 402 struct ttm_bo_global_ref bo_global_ref; 403 struct drm_global_reference mem_global_ref; 404 struct ttm_bo_device bdev; 405 bool mem_global_referenced; 406 bool initialized; 407 408 #if defined(CONFIG_DEBUG_FS) 409 struct dentry *vram; 410 struct dentry *gtt; 411 #endif 412 413 /* buffer handling */ 414 const struct amdgpu_buffer_funcs *buffer_funcs; 415 struct amdgpu_ring *buffer_funcs_ring; 416 /* Scheduler entity for buffer moves */ 417 struct amd_sched_entity entity; 418 419 /* custom LRU management */ 420 struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; 421 }; 422 423 int amdgpu_copy_buffer(struct amdgpu_ring *ring, 424 uint64_t src_offset, 425 uint64_t dst_offset, 426 uint32_t byte_count, 427 struct reservation_object *resv, 428 struct fence **fence); 429 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); 430 431 struct amdgpu_bo_list_entry { 432 struct amdgpu_bo *robj; 433 struct ttm_validate_buffer tv; 434 struct amdgpu_bo_va *bo_va; 435 uint32_t priority; 436 struct page **user_pages; 437 int user_invalidated; 438 }; 439 440 struct amdgpu_bo_va_mapping { 441 struct list_head list; 442 struct interval_tree_node it; 443 uint64_t offset; 444 uint32_t flags; 445 }; 446 447 /* bo virtual addresses in a specific vm */ 448 struct amdgpu_bo_va { 449 /* protected by bo being reserved */ 450 struct list_head bo_list; 451 struct fence *last_pt_update; 452 unsigned ref_count; 453 454 /* protected by vm mutex and spinlock */ 455 struct list_head vm_status; 456 457 /* mappings for this bo_va */ 458 struct list_head invalids; 459 struct list_head valids; 460 461 /* constant after initialization */ 462 struct amdgpu_vm *vm; 463 struct amdgpu_bo *bo; 464 }; 465 466 #define AMDGPU_GEM_DOMAIN_MAX 0x3 467 468 struct amdgpu_bo { 469 /* Protected by gem.mutex */ 470 struct list_head list; 471 /* Protected by tbo.reserved */ 472 u32 prefered_domains; 473 u32 allowed_domains; 474 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; 475 struct ttm_placement placement; 476 struct ttm_buffer_object tbo; 477 struct ttm_bo_kmap_obj kmap; 478 u64 flags; 479 unsigned pin_count; 480 void *kptr; 481 u64 tiling_flags; 482 u64 metadata_flags; 483 void *metadata; 484 u32 metadata_size; 485 /* list of all virtual address to which this bo 486 * is associated to 487 */ 488 struct list_head va; 489 /* Constant after initialization */ 490 struct amdgpu_device *adev; 491 struct drm_gem_object gem_base; 492 struct amdgpu_bo *parent; 493 494 struct ttm_bo_kmap_obj dma_buf_vmap; 495 struct amdgpu_mn *mn; 496 struct list_head mn_list; 497 }; 498 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) 499 500 void amdgpu_gem_object_free(struct drm_gem_object *obj); 501 int amdgpu_gem_object_open(struct drm_gem_object *obj, 502 struct drm_file *file_priv); 503 void amdgpu_gem_object_close(struct drm_gem_object *obj, 504 struct drm_file *file_priv); 505 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); 506 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); 507 struct drm_gem_object * 508 amdgpu_gem_prime_import_sg_table(struct drm_device *dev, 509 struct dma_buf_attachment *attach, 510 struct sg_table *sg); 511 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, 512 struct drm_gem_object *gobj, 513 int flags); 514 int amdgpu_gem_prime_pin(struct drm_gem_object *obj); 515 void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); 516 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); 517 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); 518 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 519 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); 520 521 /* sub-allocation manager, it has to be protected by another lock. 522 * By conception this is an helper for other part of the driver 523 * like the indirect buffer or semaphore, which both have their 524 * locking. 525 * 526 * Principe is simple, we keep a list of sub allocation in offset 527 * order (first entry has offset == 0, last entry has the highest 528 * offset). 529 * 530 * When allocating new object we first check if there is room at 531 * the end total_size - (last_object_offset + last_object_size) >= 532 * alloc_size. If so we allocate new object there. 533 * 534 * When there is not enough room at the end, we start waiting for 535 * each sub object until we reach object_offset+object_size >= 536 * alloc_size, this object then become the sub object we return. 537 * 538 * Alignment can't be bigger than page size. 539 * 540 * Hole are not considered for allocation to keep things simple. 541 * Assumption is that there won't be hole (all object on same 542 * alignment). 543 */ 544 545 #define AMDGPU_SA_NUM_FENCE_LISTS 32 546 547 struct amdgpu_sa_manager { 548 wait_queue_head_t wq; 549 struct amdgpu_bo *bo; 550 struct list_head *hole; 551 struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; 552 struct list_head olist; 553 unsigned size; 554 uint64_t gpu_addr; 555 void *cpu_ptr; 556 uint32_t domain; 557 uint32_t align; 558 }; 559 560 /* sub-allocation buffer */ 561 struct amdgpu_sa_bo { 562 struct list_head olist; 563 struct list_head flist; 564 struct amdgpu_sa_manager *manager; 565 unsigned soffset; 566 unsigned eoffset; 567 struct fence *fence; 568 }; 569 570 /* 571 * GEM objects. 572 */ 573 void amdgpu_gem_force_release(struct amdgpu_device *adev); 574 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 575 int alignment, u32 initial_domain, 576 u64 flags, bool kernel, 577 struct drm_gem_object **obj); 578 579 int amdgpu_mode_dumb_create(struct drm_file *file_priv, 580 struct drm_device *dev, 581 struct drm_mode_create_dumb *args); 582 int amdgpu_mode_dumb_mmap(struct drm_file *filp, 583 struct drm_device *dev, 584 uint32_t handle, uint64_t *offset_p); 585 /* 586 * Synchronization 587 */ 588 struct amdgpu_sync { 589 DECLARE_HASHTABLE(fences, 4); 590 struct fence *last_vm_update; 591 }; 592 593 void amdgpu_sync_create(struct amdgpu_sync *sync); 594 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, 595 struct fence *f); 596 int amdgpu_sync_resv(struct amdgpu_device *adev, 597 struct amdgpu_sync *sync, 598 struct reservation_object *resv, 599 void *owner); 600 bool amdgpu_sync_is_idle(struct amdgpu_sync *sync); 601 int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src, 602 struct fence *fence); 603 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); 604 int amdgpu_sync_wait(struct amdgpu_sync *sync); 605 void amdgpu_sync_free(struct amdgpu_sync *sync); 606 int amdgpu_sync_init(void); 607 void amdgpu_sync_fini(void); 608 int amdgpu_fence_slab_init(void); 609 void amdgpu_fence_slab_fini(void); 610 611 /* 612 * GART structures, functions & helpers 613 */ 614 struct amdgpu_mc; 615 616 #define AMDGPU_GPU_PAGE_SIZE 4096 617 #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) 618 #define AMDGPU_GPU_PAGE_SHIFT 12 619 #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) 620 621 struct amdgpu_gart { 622 dma_addr_t table_addr; 623 struct amdgpu_bo *robj; 624 void *ptr; 625 unsigned num_gpu_pages; 626 unsigned num_cpu_pages; 627 unsigned table_size; 628 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 629 struct page **pages; 630 #endif 631 bool ready; 632 const struct amdgpu_gart_funcs *gart_funcs; 633 }; 634 635 int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); 636 void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); 637 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); 638 void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); 639 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); 640 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); 641 int amdgpu_gart_init(struct amdgpu_device *adev); 642 void amdgpu_gart_fini(struct amdgpu_device *adev); 643 void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, 644 int pages); 645 int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, 646 int pages, struct page **pagelist, 647 dma_addr_t *dma_addr, uint32_t flags); 648 649 /* 650 * GPU MC structures, functions & helpers 651 */ 652 struct amdgpu_mc { 653 resource_size_t aper_size; 654 resource_size_t aper_base; 655 resource_size_t agp_base; 656 /* for some chips with <= 32MB we need to lie 657 * about vram size near mc fb location */ 658 u64 mc_vram_size; 659 u64 visible_vram_size; 660 u64 gtt_size; 661 u64 gtt_start; 662 u64 gtt_end; 663 u64 vram_start; 664 u64 vram_end; 665 unsigned vram_width; 666 u64 real_vram_size; 667 int vram_mtrr; 668 u64 gtt_base_align; 669 u64 mc_mask; 670 const struct firmware *fw; /* MC firmware */ 671 uint32_t fw_version; 672 struct amdgpu_irq_src vm_fault; 673 uint32_t vram_type; 674 }; 675 676 /* 677 * GPU doorbell structures, functions & helpers 678 */ 679 typedef enum _AMDGPU_DOORBELL_ASSIGNMENT 680 { 681 AMDGPU_DOORBELL_KIQ = 0x000, 682 AMDGPU_DOORBELL_HIQ = 0x001, 683 AMDGPU_DOORBELL_DIQ = 0x002, 684 AMDGPU_DOORBELL_MEC_RING0 = 0x010, 685 AMDGPU_DOORBELL_MEC_RING1 = 0x011, 686 AMDGPU_DOORBELL_MEC_RING2 = 0x012, 687 AMDGPU_DOORBELL_MEC_RING3 = 0x013, 688 AMDGPU_DOORBELL_MEC_RING4 = 0x014, 689 AMDGPU_DOORBELL_MEC_RING5 = 0x015, 690 AMDGPU_DOORBELL_MEC_RING6 = 0x016, 691 AMDGPU_DOORBELL_MEC_RING7 = 0x017, 692 AMDGPU_DOORBELL_GFX_RING0 = 0x020, 693 AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, 694 AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, 695 AMDGPU_DOORBELL_IH = 0x1E8, 696 AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, 697 AMDGPU_DOORBELL_INVALID = 0xFFFF 698 } AMDGPU_DOORBELL_ASSIGNMENT; 699 700 struct amdgpu_doorbell { 701 /* doorbell mmio */ 702 resource_size_t base; 703 resource_size_t size; 704 u32 __iomem *ptr; 705 u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ 706 }; 707 708 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, 709 phys_addr_t *aperture_base, 710 size_t *aperture_size, 711 size_t *start_offset); 712 713 /* 714 * IRQS. 715 */ 716 717 struct amdgpu_flip_work { 718 struct work_struct flip_work; 719 struct work_struct unpin_work; 720 struct amdgpu_device *adev; 721 int crtc_id; 722 uint64_t base; 723 struct drm_pending_vblank_event *event; 724 struct amdgpu_bo *old_rbo; 725 struct fence *excl; 726 unsigned shared_count; 727 struct fence **shared; 728 struct fence_cb cb; 729 bool async; 730 }; 731 732 733 /* 734 * CP & rings. 735 */ 736 737 struct amdgpu_ib { 738 struct amdgpu_sa_bo *sa_bo; 739 uint32_t length_dw; 740 uint64_t gpu_addr; 741 uint32_t *ptr; 742 uint32_t flags; 743 }; 744 745 enum amdgpu_ring_type { 746 AMDGPU_RING_TYPE_GFX, 747 AMDGPU_RING_TYPE_COMPUTE, 748 AMDGPU_RING_TYPE_SDMA, 749 AMDGPU_RING_TYPE_UVD, 750 AMDGPU_RING_TYPE_VCE 751 }; 752 753 extern const struct amd_sched_backend_ops amdgpu_sched_ops; 754 755 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 756 struct amdgpu_job **job, struct amdgpu_vm *vm); 757 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 758 struct amdgpu_job **job); 759 760 void amdgpu_job_free(struct amdgpu_job *job); 761 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 762 struct amd_sched_entity *entity, void *owner, 763 struct fence **f); 764 765 struct amdgpu_ring { 766 struct amdgpu_device *adev; 767 const struct amdgpu_ring_funcs *funcs; 768 struct amdgpu_fence_driver fence_drv; 769 struct amd_gpu_scheduler sched; 770 771 spinlock_t fence_lock; 772 struct amdgpu_bo *ring_obj; 773 volatile uint32_t *ring; 774 unsigned rptr_offs; 775 u64 next_rptr_gpu_addr; 776 volatile u32 *next_rptr_cpu_addr; 777 unsigned wptr; 778 unsigned wptr_old; 779 unsigned ring_size; 780 unsigned max_dw; 781 int count_dw; 782 uint64_t gpu_addr; 783 uint32_t align_mask; 784 uint32_t ptr_mask; 785 bool ready; 786 u32 nop; 787 u32 idx; 788 u32 me; 789 u32 pipe; 790 u32 queue; 791 struct amdgpu_bo *mqd_obj; 792 u32 doorbell_index; 793 bool use_doorbell; 794 unsigned wptr_offs; 795 unsigned next_rptr_offs; 796 unsigned fence_offs; 797 uint64_t current_ctx; 798 enum amdgpu_ring_type type; 799 char name[16]; 800 unsigned cond_exe_offs; 801 u64 cond_exe_gpu_addr; 802 volatile u32 *cond_exe_cpu_addr; 803 #if defined(CONFIG_DEBUG_FS) 804 struct dentry *ent; 805 #endif 806 }; 807 808 /* 809 * VM 810 */ 811 812 /* maximum number of VMIDs */ 813 #define AMDGPU_NUM_VM 16 814 815 /* number of entries in page table */ 816 #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) 817 818 /* PTBs (Page Table Blocks) need to be aligned to 32K */ 819 #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 820 #define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1) 821 #define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK) 822 823 #define AMDGPU_PTE_VALID (1 << 0) 824 #define AMDGPU_PTE_SYSTEM (1 << 1) 825 #define AMDGPU_PTE_SNOOPED (1 << 2) 826 827 /* VI only */ 828 #define AMDGPU_PTE_EXECUTABLE (1 << 4) 829 830 #define AMDGPU_PTE_READABLE (1 << 5) 831 #define AMDGPU_PTE_WRITEABLE (1 << 6) 832 833 /* PTE (Page Table Entry) fragment field for different page sizes */ 834 #define AMDGPU_PTE_FRAG_4KB (0 << 7) 835 #define AMDGPU_PTE_FRAG_64KB (4 << 7) 836 #define AMDGPU_LOG2_PAGES_PER_FRAG 4 837 838 /* How to programm VM fault handling */ 839 #define AMDGPU_VM_FAULT_STOP_NEVER 0 840 #define AMDGPU_VM_FAULT_STOP_FIRST 1 841 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 842 843 struct amdgpu_vm_pt { 844 struct amdgpu_bo_list_entry entry; 845 uint64_t addr; 846 }; 847 848 struct amdgpu_vm { 849 /* tree of virtual addresses mapped */ 850 struct rb_root va; 851 852 /* protecting invalidated */ 853 spinlock_t status_lock; 854 855 /* BOs moved, but not yet updated in the PT */ 856 struct list_head invalidated; 857 858 /* BOs cleared in the PT because of a move */ 859 struct list_head cleared; 860 861 /* BO mappings freed, but not yet updated in the PT */ 862 struct list_head freed; 863 864 /* contains the page directory */ 865 struct amdgpu_bo *page_directory; 866 unsigned max_pde_used; 867 struct fence *page_directory_fence; 868 869 /* array of page tables, one for each page directory entry */ 870 struct amdgpu_vm_pt *page_tables; 871 872 /* for id and flush management per ring */ 873 struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; 874 875 /* protecting freed */ 876 spinlock_t freed_lock; 877 878 /* Scheduler entity for page table updates */ 879 struct amd_sched_entity entity; 880 881 /* client id */ 882 u64 client_id; 883 }; 884 885 struct amdgpu_vm_id { 886 struct list_head list; 887 struct fence *first; 888 struct amdgpu_sync active; 889 struct fence *last_flush; 890 struct amdgpu_ring *last_user; 891 atomic64_t owner; 892 893 uint64_t pd_gpu_addr; 894 /* last flushed PD/PT update */ 895 struct fence *flushed_updates; 896 897 uint32_t gds_base; 898 uint32_t gds_size; 899 uint32_t gws_base; 900 uint32_t gws_size; 901 uint32_t oa_base; 902 uint32_t oa_size; 903 }; 904 905 struct amdgpu_vm_manager { 906 /* Handling of VMIDs */ 907 struct mutex lock; 908 unsigned num_ids; 909 struct list_head ids_lru; 910 struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; 911 912 uint32_t max_pfn; 913 /* vram base address for page table entry */ 914 u64 vram_base_offset; 915 /* is vm enabled? */ 916 bool enabled; 917 /* vm pte handling */ 918 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 919 struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; 920 unsigned vm_pte_num_rings; 921 atomic_t vm_pte_next_ring; 922 /* client id counter */ 923 atomic64_t client_counter; 924 }; 925 926 void amdgpu_vm_manager_init(struct amdgpu_device *adev); 927 void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 928 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); 929 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 930 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 931 struct list_head *validated, 932 struct amdgpu_bo_list_entry *entry); 933 void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); 934 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, 935 struct amdgpu_vm *vm); 936 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 937 struct amdgpu_sync *sync, struct fence *fence, 938 unsigned *vm_id, uint64_t *vm_pd_addr); 939 int amdgpu_vm_flush(struct amdgpu_ring *ring, 940 unsigned vm_id, uint64_t pd_addr, 941 uint32_t gds_base, uint32_t gds_size, 942 uint32_t gws_base, uint32_t gws_size, 943 uint32_t oa_base, uint32_t oa_size); 944 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); 945 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 946 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 947 struct amdgpu_vm *vm); 948 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 949 struct amdgpu_vm *vm); 950 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, 951 struct amdgpu_sync *sync); 952 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 953 struct amdgpu_bo_va *bo_va, 954 struct ttm_mem_reg *mem); 955 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 956 struct amdgpu_bo *bo); 957 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 958 struct amdgpu_bo *bo); 959 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 960 struct amdgpu_vm *vm, 961 struct amdgpu_bo *bo); 962 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 963 struct amdgpu_bo_va *bo_va, 964 uint64_t addr, uint64_t offset, 965 uint64_t size, uint32_t flags); 966 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 967 struct amdgpu_bo_va *bo_va, 968 uint64_t addr); 969 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 970 struct amdgpu_bo_va *bo_va); 971 972 /* 973 * context related structures 974 */ 975 976 struct amdgpu_ctx_ring { 977 uint64_t sequence; 978 struct fence **fences; 979 struct amd_sched_entity entity; 980 }; 981 982 struct amdgpu_ctx { 983 struct kref refcount; 984 struct amdgpu_device *adev; 985 unsigned reset_counter; 986 spinlock_t ring_lock; 987 struct fence **fences; 988 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; 989 }; 990 991 struct amdgpu_ctx_mgr { 992 struct amdgpu_device *adev; 993 struct mutex lock; 994 /* protected by lock */ 995 struct idr ctx_handles; 996 }; 997 998 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); 999 int amdgpu_ctx_put(struct amdgpu_ctx *ctx); 1000 1001 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, 1002 struct fence *fence); 1003 struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 1004 struct amdgpu_ring *ring, uint64_t seq); 1005 1006 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 1007 struct drm_file *filp); 1008 1009 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); 1010 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); 1011 1012 /* 1013 * file private structure 1014 */ 1015 1016 struct amdgpu_fpriv { 1017 struct amdgpu_vm vm; 1018 struct mutex bo_list_lock; 1019 struct idr bo_list_handles; 1020 struct amdgpu_ctx_mgr ctx_mgr; 1021 }; 1022 1023 /* 1024 * residency list 1025 */ 1026 1027 struct amdgpu_bo_list { 1028 struct mutex lock; 1029 struct amdgpu_bo *gds_obj; 1030 struct amdgpu_bo *gws_obj; 1031 struct amdgpu_bo *oa_obj; 1032 unsigned first_userptr; 1033 unsigned num_entries; 1034 struct amdgpu_bo_list_entry *array; 1035 }; 1036 1037 struct amdgpu_bo_list * 1038 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); 1039 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, 1040 struct list_head *validated); 1041 void amdgpu_bo_list_put(struct amdgpu_bo_list *list); 1042 void amdgpu_bo_list_free(struct amdgpu_bo_list *list); 1043 1044 /* 1045 * GFX stuff 1046 */ 1047 #include "clearstate_defs.h" 1048 1049 struct amdgpu_rlc_funcs { 1050 void (*enter_safe_mode)(struct amdgpu_device *adev); 1051 void (*exit_safe_mode)(struct amdgpu_device *adev); 1052 }; 1053 1054 struct amdgpu_rlc { 1055 /* for power gating */ 1056 struct amdgpu_bo *save_restore_obj; 1057 uint64_t save_restore_gpu_addr; 1058 volatile uint32_t *sr_ptr; 1059 const u32 *reg_list; 1060 u32 reg_list_size; 1061 /* for clear state */ 1062 struct amdgpu_bo *clear_state_obj; 1063 uint64_t clear_state_gpu_addr; 1064 volatile uint32_t *cs_ptr; 1065 const struct cs_section_def *cs_data; 1066 u32 clear_state_size; 1067 /* for cp tables */ 1068 struct amdgpu_bo *cp_table_obj; 1069 uint64_t cp_table_gpu_addr; 1070 volatile uint32_t *cp_table_ptr; 1071 u32 cp_table_size; 1072 1073 /* safe mode for updating CG/PG state */ 1074 bool in_safe_mode; 1075 const struct amdgpu_rlc_funcs *funcs; 1076 1077 /* for firmware data */ 1078 u32 save_and_restore_offset; 1079 u32 clear_state_descriptor_offset; 1080 u32 avail_scratch_ram_locations; 1081 u32 reg_restore_list_size; 1082 u32 reg_list_format_start; 1083 u32 reg_list_format_separate_start; 1084 u32 starting_offsets_start; 1085 u32 reg_list_format_size_bytes; 1086 u32 reg_list_size_bytes; 1087 1088 u32 *register_list_format; 1089 u32 *register_restore; 1090 }; 1091 1092 struct amdgpu_mec { 1093 struct amdgpu_bo *hpd_eop_obj; 1094 u64 hpd_eop_gpu_addr; 1095 u32 num_pipe; 1096 u32 num_mec; 1097 u32 num_queue; 1098 }; 1099 1100 /* 1101 * GPU scratch registers structures, functions & helpers 1102 */ 1103 struct amdgpu_scratch { 1104 unsigned num_reg; 1105 uint32_t reg_base; 1106 bool free[32]; 1107 uint32_t reg[32]; 1108 }; 1109 1110 /* 1111 * GFX configurations 1112 */ 1113 struct amdgpu_gca_config { 1114 unsigned max_shader_engines; 1115 unsigned max_tile_pipes; 1116 unsigned max_cu_per_sh; 1117 unsigned max_sh_per_se; 1118 unsigned max_backends_per_se; 1119 unsigned max_texture_channel_caches; 1120 unsigned max_gprs; 1121 unsigned max_gs_threads; 1122 unsigned max_hw_contexts; 1123 unsigned sc_prim_fifo_size_frontend; 1124 unsigned sc_prim_fifo_size_backend; 1125 unsigned sc_hiz_tile_fifo_size; 1126 unsigned sc_earlyz_tile_fifo_size; 1127 1128 unsigned num_tile_pipes; 1129 unsigned backend_enable_mask; 1130 unsigned mem_max_burst_length_bytes; 1131 unsigned mem_row_size_in_kb; 1132 unsigned shader_engine_tile_size; 1133 unsigned num_gpus; 1134 unsigned multi_gpu_tile_size; 1135 unsigned mc_arb_ramcfg; 1136 unsigned gb_addr_config; 1137 unsigned num_rbs; 1138 1139 uint32_t tile_mode_array[32]; 1140 uint32_t macrotile_mode_array[16]; 1141 }; 1142 1143 struct amdgpu_cu_info { 1144 uint32_t number; /* total active CU number */ 1145 uint32_t ao_cu_mask; 1146 uint32_t bitmap[4][4]; 1147 }; 1148 1149 struct amdgpu_gfx { 1150 struct mutex gpu_clock_mutex; 1151 struct amdgpu_gca_config config; 1152 struct amdgpu_rlc rlc; 1153 struct amdgpu_mec mec; 1154 struct amdgpu_scratch scratch; 1155 const struct firmware *me_fw; /* ME firmware */ 1156 uint32_t me_fw_version; 1157 const struct firmware *pfp_fw; /* PFP firmware */ 1158 uint32_t pfp_fw_version; 1159 const struct firmware *ce_fw; /* CE firmware */ 1160 uint32_t ce_fw_version; 1161 const struct firmware *rlc_fw; /* RLC firmware */ 1162 uint32_t rlc_fw_version; 1163 const struct firmware *mec_fw; /* MEC firmware */ 1164 uint32_t mec_fw_version; 1165 const struct firmware *mec2_fw; /* MEC2 firmware */ 1166 uint32_t mec2_fw_version; 1167 uint32_t me_feature_version; 1168 uint32_t ce_feature_version; 1169 uint32_t pfp_feature_version; 1170 uint32_t rlc_feature_version; 1171 uint32_t mec_feature_version; 1172 uint32_t mec2_feature_version; 1173 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 1174 unsigned num_gfx_rings; 1175 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; 1176 unsigned num_compute_rings; 1177 struct amdgpu_irq_src eop_irq; 1178 struct amdgpu_irq_src priv_reg_irq; 1179 struct amdgpu_irq_src priv_inst_irq; 1180 /* gfx status */ 1181 uint32_t gfx_current_status; 1182 /* ce ram size*/ 1183 unsigned ce_ram_size; 1184 struct amdgpu_cu_info cu_info; 1185 }; 1186 1187 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1188 unsigned size, struct amdgpu_ib *ib); 1189 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 1190 struct fence *f); 1191 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 1192 struct amdgpu_ib *ib, struct fence *last_vm_update, 1193 struct amdgpu_job *job, struct fence **f); 1194 int amdgpu_ib_pool_init(struct amdgpu_device *adev); 1195 void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 1196 int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 1197 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); 1198 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); 1199 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 1200 void amdgpu_ring_commit(struct amdgpu_ring *ring); 1201 void amdgpu_ring_undo(struct amdgpu_ring *ring); 1202 unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, 1203 uint32_t **data); 1204 int amdgpu_ring_restore(struct amdgpu_ring *ring, 1205 unsigned size, uint32_t *data); 1206 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 1207 unsigned ring_size, u32 nop, u32 align_mask, 1208 struct amdgpu_irq_src *irq_src, unsigned irq_type, 1209 enum amdgpu_ring_type ring_type); 1210 void amdgpu_ring_fini(struct amdgpu_ring *ring); 1211 1212 /* 1213 * CS. 1214 */ 1215 struct amdgpu_cs_chunk { 1216 uint32_t chunk_id; 1217 uint32_t length_dw; 1218 void *kdata; 1219 }; 1220 1221 struct amdgpu_cs_parser { 1222 struct amdgpu_device *adev; 1223 struct drm_file *filp; 1224 struct amdgpu_ctx *ctx; 1225 1226 /* chunks */ 1227 unsigned nchunks; 1228 struct amdgpu_cs_chunk *chunks; 1229 1230 /* scheduler job object */ 1231 struct amdgpu_job *job; 1232 1233 /* buffer objects */ 1234 struct ww_acquire_ctx ticket; 1235 struct amdgpu_bo_list *bo_list; 1236 struct amdgpu_bo_list_entry vm_pd; 1237 struct list_head validated; 1238 struct fence *fence; 1239 uint64_t bytes_moved_threshold; 1240 uint64_t bytes_moved; 1241 1242 /* user fence */ 1243 struct amdgpu_bo_list_entry uf_entry; 1244 }; 1245 1246 struct amdgpu_job { 1247 struct amd_sched_job base; 1248 struct amdgpu_device *adev; 1249 struct amdgpu_vm *vm; 1250 struct amdgpu_ring *ring; 1251 struct amdgpu_sync sync; 1252 struct amdgpu_ib *ibs; 1253 struct fence *fence; /* the hw fence */ 1254 uint32_t num_ibs; 1255 void *owner; 1256 uint64_t ctx; 1257 unsigned vm_id; 1258 uint64_t vm_pd_addr; 1259 uint32_t gds_base, gds_size; 1260 uint32_t gws_base, gws_size; 1261 uint32_t oa_base, oa_size; 1262 1263 /* user fence handling */ 1264 struct amdgpu_bo *uf_bo; 1265 uint32_t uf_offset; 1266 uint64_t uf_sequence; 1267 1268 }; 1269 #define to_amdgpu_job(sched_job) \ 1270 container_of((sched_job), struct amdgpu_job, base) 1271 1272 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, 1273 uint32_t ib_idx, int idx) 1274 { 1275 return p->job->ibs[ib_idx].ptr[idx]; 1276 } 1277 1278 static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, 1279 uint32_t ib_idx, int idx, 1280 uint32_t value) 1281 { 1282 p->job->ibs[ib_idx].ptr[idx] = value; 1283 } 1284 1285 /* 1286 * Writeback 1287 */ 1288 #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ 1289 1290 struct amdgpu_wb { 1291 struct amdgpu_bo *wb_obj; 1292 volatile uint32_t *wb; 1293 uint64_t gpu_addr; 1294 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ 1295 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; 1296 }; 1297 1298 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); 1299 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); 1300 1301 1302 1303 enum amdgpu_int_thermal_type { 1304 THERMAL_TYPE_NONE, 1305 THERMAL_TYPE_EXTERNAL, 1306 THERMAL_TYPE_EXTERNAL_GPIO, 1307 THERMAL_TYPE_RV6XX, 1308 THERMAL_TYPE_RV770, 1309 THERMAL_TYPE_ADT7473_WITH_INTERNAL, 1310 THERMAL_TYPE_EVERGREEN, 1311 THERMAL_TYPE_SUMO, 1312 THERMAL_TYPE_NI, 1313 THERMAL_TYPE_SI, 1314 THERMAL_TYPE_EMC2103_WITH_INTERNAL, 1315 THERMAL_TYPE_CI, 1316 THERMAL_TYPE_KV, 1317 }; 1318 1319 enum amdgpu_dpm_auto_throttle_src { 1320 AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, 1321 AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL 1322 }; 1323 1324 enum amdgpu_dpm_event_src { 1325 AMDGPU_DPM_EVENT_SRC_ANALOG = 0, 1326 AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, 1327 AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, 1328 AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, 1329 AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 1330 }; 1331 1332 #define AMDGPU_MAX_VCE_LEVELS 6 1333 1334 enum amdgpu_vce_level { 1335 AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ 1336 AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ 1337 AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ 1338 AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ 1339 AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ 1340 AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ 1341 }; 1342 1343 struct amdgpu_ps { 1344 u32 caps; /* vbios flags */ 1345 u32 class; /* vbios flags */ 1346 u32 class2; /* vbios flags */ 1347 /* UVD clocks */ 1348 u32 vclk; 1349 u32 dclk; 1350 /* VCE clocks */ 1351 u32 evclk; 1352 u32 ecclk; 1353 bool vce_active; 1354 enum amdgpu_vce_level vce_level; 1355 /* asic priv */ 1356 void *ps_priv; 1357 }; 1358 1359 struct amdgpu_dpm_thermal { 1360 /* thermal interrupt work */ 1361 struct work_struct work; 1362 /* low temperature threshold */ 1363 int min_temp; 1364 /* high temperature threshold */ 1365 int max_temp; 1366 /* was last interrupt low to high or high to low */ 1367 bool high_to_low; 1368 /* interrupt source */ 1369 struct amdgpu_irq_src irq; 1370 }; 1371 1372 enum amdgpu_clk_action 1373 { 1374 AMDGPU_SCLK_UP = 1, 1375 AMDGPU_SCLK_DOWN 1376 }; 1377 1378 struct amdgpu_blacklist_clocks 1379 { 1380 u32 sclk; 1381 u32 mclk; 1382 enum amdgpu_clk_action action; 1383 }; 1384 1385 struct amdgpu_clock_and_voltage_limits { 1386 u32 sclk; 1387 u32 mclk; 1388 u16 vddc; 1389 u16 vddci; 1390 }; 1391 1392 struct amdgpu_clock_array { 1393 u32 count; 1394 u32 *values; 1395 }; 1396 1397 struct amdgpu_clock_voltage_dependency_entry { 1398 u32 clk; 1399 u16 v; 1400 }; 1401 1402 struct amdgpu_clock_voltage_dependency_table { 1403 u32 count; 1404 struct amdgpu_clock_voltage_dependency_entry *entries; 1405 }; 1406 1407 union amdgpu_cac_leakage_entry { 1408 struct { 1409 u16 vddc; 1410 u32 leakage; 1411 }; 1412 struct { 1413 u16 vddc1; 1414 u16 vddc2; 1415 u16 vddc3; 1416 }; 1417 }; 1418 1419 struct amdgpu_cac_leakage_table { 1420 u32 count; 1421 union amdgpu_cac_leakage_entry *entries; 1422 }; 1423 1424 struct amdgpu_phase_shedding_limits_entry { 1425 u16 voltage; 1426 u32 sclk; 1427 u32 mclk; 1428 }; 1429 1430 struct amdgpu_phase_shedding_limits_table { 1431 u32 count; 1432 struct amdgpu_phase_shedding_limits_entry *entries; 1433 }; 1434 1435 struct amdgpu_uvd_clock_voltage_dependency_entry { 1436 u32 vclk; 1437 u32 dclk; 1438 u16 v; 1439 }; 1440 1441 struct amdgpu_uvd_clock_voltage_dependency_table { 1442 u8 count; 1443 struct amdgpu_uvd_clock_voltage_dependency_entry *entries; 1444 }; 1445 1446 struct amdgpu_vce_clock_voltage_dependency_entry { 1447 u32 ecclk; 1448 u32 evclk; 1449 u16 v; 1450 }; 1451 1452 struct amdgpu_vce_clock_voltage_dependency_table { 1453 u8 count; 1454 struct amdgpu_vce_clock_voltage_dependency_entry *entries; 1455 }; 1456 1457 struct amdgpu_ppm_table { 1458 u8 ppm_design; 1459 u16 cpu_core_number; 1460 u32 platform_tdp; 1461 u32 small_ac_platform_tdp; 1462 u32 platform_tdc; 1463 u32 small_ac_platform_tdc; 1464 u32 apu_tdp; 1465 u32 dgpu_tdp; 1466 u32 dgpu_ulv_power; 1467 u32 tj_max; 1468 }; 1469 1470 struct amdgpu_cac_tdp_table { 1471 u16 tdp; 1472 u16 configurable_tdp; 1473 u16 tdc; 1474 u16 battery_power_limit; 1475 u16 small_power_limit; 1476 u16 low_cac_leakage; 1477 u16 high_cac_leakage; 1478 u16 maximum_power_delivery_limit; 1479 }; 1480 1481 struct amdgpu_dpm_dynamic_state { 1482 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; 1483 struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; 1484 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; 1485 struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; 1486 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; 1487 struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; 1488 struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; 1489 struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; 1490 struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; 1491 struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; 1492 struct amdgpu_clock_array valid_sclk_values; 1493 struct amdgpu_clock_array valid_mclk_values; 1494 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; 1495 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; 1496 u32 mclk_sclk_ratio; 1497 u32 sclk_mclk_delta; 1498 u16 vddc_vddci_delta; 1499 u16 min_vddc_for_pcie_gen2; 1500 struct amdgpu_cac_leakage_table cac_leakage_table; 1501 struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; 1502 struct amdgpu_ppm_table *ppm_table; 1503 struct amdgpu_cac_tdp_table *cac_tdp_table; 1504 }; 1505 1506 struct amdgpu_dpm_fan { 1507 u16 t_min; 1508 u16 t_med; 1509 u16 t_high; 1510 u16 pwm_min; 1511 u16 pwm_med; 1512 u16 pwm_high; 1513 u8 t_hyst; 1514 u32 cycle_delay; 1515 u16 t_max; 1516 u8 control_mode; 1517 u16 default_max_fan_pwm; 1518 u16 default_fan_output_sensitivity; 1519 u16 fan_output_sensitivity; 1520 bool ucode_fan_control; 1521 }; 1522 1523 enum amdgpu_pcie_gen { 1524 AMDGPU_PCIE_GEN1 = 0, 1525 AMDGPU_PCIE_GEN2 = 1, 1526 AMDGPU_PCIE_GEN3 = 2, 1527 AMDGPU_PCIE_GEN_INVALID = 0xffff 1528 }; 1529 1530 enum amdgpu_dpm_forced_level { 1531 AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, 1532 AMDGPU_DPM_FORCED_LEVEL_LOW = 1, 1533 AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, 1534 AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, 1535 }; 1536 1537 struct amdgpu_vce_state { 1538 /* vce clocks */ 1539 u32 evclk; 1540 u32 ecclk; 1541 /* gpu clocks */ 1542 u32 sclk; 1543 u32 mclk; 1544 u8 clk_idx; 1545 u8 pstate; 1546 }; 1547 1548 struct amdgpu_dpm_funcs { 1549 int (*get_temperature)(struct amdgpu_device *adev); 1550 int (*pre_set_power_state)(struct amdgpu_device *adev); 1551 int (*set_power_state)(struct amdgpu_device *adev); 1552 void (*post_set_power_state)(struct amdgpu_device *adev); 1553 void (*display_configuration_changed)(struct amdgpu_device *adev); 1554 u32 (*get_sclk)(struct amdgpu_device *adev, bool low); 1555 u32 (*get_mclk)(struct amdgpu_device *adev, bool low); 1556 void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); 1557 void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); 1558 int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); 1559 bool (*vblank_too_short)(struct amdgpu_device *adev); 1560 void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); 1561 void (*powergate_vce)(struct amdgpu_device *adev, bool gate); 1562 void (*enable_bapm)(struct amdgpu_device *adev, bool enable); 1563 void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); 1564 u32 (*get_fan_control_mode)(struct amdgpu_device *adev); 1565 int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); 1566 int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); 1567 int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); 1568 int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); 1569 int (*get_sclk_od)(struct amdgpu_device *adev); 1570 int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); 1571 int (*get_mclk_od)(struct amdgpu_device *adev); 1572 int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); 1573 }; 1574 1575 struct amdgpu_dpm { 1576 struct amdgpu_ps *ps; 1577 /* number of valid power states */ 1578 int num_ps; 1579 /* current power state that is active */ 1580 struct amdgpu_ps *current_ps; 1581 /* requested power state */ 1582 struct amdgpu_ps *requested_ps; 1583 /* boot up power state */ 1584 struct amdgpu_ps *boot_ps; 1585 /* default uvd power state */ 1586 struct amdgpu_ps *uvd_ps; 1587 /* vce requirements */ 1588 struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; 1589 enum amdgpu_vce_level vce_level; 1590 enum amd_pm_state_type state; 1591 enum amd_pm_state_type user_state; 1592 u32 platform_caps; 1593 u32 voltage_response_time; 1594 u32 backbias_response_time; 1595 void *priv; 1596 u32 new_active_crtcs; 1597 int new_active_crtc_count; 1598 u32 current_active_crtcs; 1599 int current_active_crtc_count; 1600 struct amdgpu_dpm_dynamic_state dyn_state; 1601 struct amdgpu_dpm_fan fan; 1602 u32 tdp_limit; 1603 u32 near_tdp_limit; 1604 u32 near_tdp_limit_adjusted; 1605 u32 sq_ramping_threshold; 1606 u32 cac_leakage; 1607 u16 tdp_od_limit; 1608 u32 tdp_adjustment; 1609 u16 load_line_slope; 1610 bool power_control; 1611 bool ac_power; 1612 /* special states active */ 1613 bool thermal_active; 1614 bool uvd_active; 1615 bool vce_active; 1616 /* thermal handling */ 1617 struct amdgpu_dpm_thermal thermal; 1618 /* forced levels */ 1619 enum amdgpu_dpm_forced_level forced_level; 1620 }; 1621 1622 struct amdgpu_pm { 1623 struct mutex mutex; 1624 u32 current_sclk; 1625 u32 current_mclk; 1626 u32 default_sclk; 1627 u32 default_mclk; 1628 struct amdgpu_i2c_chan *i2c_bus; 1629 /* internal thermal controller on rv6xx+ */ 1630 enum amdgpu_int_thermal_type int_thermal_type; 1631 struct device *int_hwmon_dev; 1632 /* fan control parameters */ 1633 bool no_fan; 1634 u8 fan_pulses_per_revolution; 1635 u8 fan_min_rpm; 1636 u8 fan_max_rpm; 1637 /* dpm */ 1638 bool dpm_enabled; 1639 bool sysfs_initialized; 1640 struct amdgpu_dpm dpm; 1641 const struct firmware *fw; /* SMC firmware */ 1642 uint32_t fw_version; 1643 const struct amdgpu_dpm_funcs *funcs; 1644 uint32_t pcie_gen_mask; 1645 uint32_t pcie_mlw_mask; 1646 struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ 1647 }; 1648 1649 void amdgpu_get_pcie_info(struct amdgpu_device *adev); 1650 1651 /* 1652 * UVD 1653 */ 1654 #define AMDGPU_DEFAULT_UVD_HANDLES 10 1655 #define AMDGPU_MAX_UVD_HANDLES 40 1656 #define AMDGPU_UVD_STACK_SIZE (200*1024) 1657 #define AMDGPU_UVD_HEAP_SIZE (256*1024) 1658 #define AMDGPU_UVD_SESSION_SIZE (50*1024) 1659 #define AMDGPU_UVD_FIRMWARE_OFFSET 256 1660 1661 struct amdgpu_uvd { 1662 struct amdgpu_bo *vcpu_bo; 1663 void *cpu_addr; 1664 uint64_t gpu_addr; 1665 unsigned fw_version; 1666 void *saved_bo; 1667 unsigned max_handles; 1668 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 1669 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 1670 struct delayed_work idle_work; 1671 const struct firmware *fw; /* UVD firmware */ 1672 struct amdgpu_ring ring; 1673 struct amdgpu_irq_src irq; 1674 bool address_64_bit; 1675 struct amd_sched_entity entity; 1676 }; 1677 1678 /* 1679 * VCE 1680 */ 1681 #define AMDGPU_MAX_VCE_HANDLES 16 1682 #define AMDGPU_VCE_FIRMWARE_OFFSET 256 1683 1684 #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) 1685 #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) 1686 1687 struct amdgpu_vce { 1688 struct amdgpu_bo *vcpu_bo; 1689 uint64_t gpu_addr; 1690 unsigned fw_version; 1691 unsigned fb_version; 1692 atomic_t handles[AMDGPU_MAX_VCE_HANDLES]; 1693 struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; 1694 uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; 1695 struct delayed_work idle_work; 1696 const struct firmware *fw; /* VCE firmware */ 1697 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1698 struct amdgpu_irq_src irq; 1699 unsigned harvest_config; 1700 struct amd_sched_entity entity; 1701 }; 1702 1703 /* 1704 * SDMA 1705 */ 1706 struct amdgpu_sdma_instance { 1707 /* SDMA firmware */ 1708 const struct firmware *fw; 1709 uint32_t fw_version; 1710 uint32_t feature_version; 1711 1712 struct amdgpu_ring ring; 1713 bool burst_nop; 1714 }; 1715 1716 struct amdgpu_sdma { 1717 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; 1718 struct amdgpu_irq_src trap_irq; 1719 struct amdgpu_irq_src illegal_inst_irq; 1720 int num_instances; 1721 }; 1722 1723 /* 1724 * Firmware 1725 */ 1726 struct amdgpu_firmware { 1727 struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM]; 1728 bool smu_load; 1729 struct amdgpu_bo *fw_buf; 1730 unsigned int fw_size; 1731 }; 1732 1733 /* 1734 * Benchmarking 1735 */ 1736 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); 1737 1738 1739 /* 1740 * Testing 1741 */ 1742 void amdgpu_test_moves(struct amdgpu_device *adev); 1743 void amdgpu_test_ring_sync(struct amdgpu_device *adev, 1744 struct amdgpu_ring *cpA, 1745 struct amdgpu_ring *cpB); 1746 void amdgpu_test_syncing(struct amdgpu_device *adev); 1747 1748 /* 1749 * MMU Notifier 1750 */ 1751 #if defined(CONFIG_MMU_NOTIFIER) 1752 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); 1753 void amdgpu_mn_unregister(struct amdgpu_bo *bo); 1754 #else 1755 static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) 1756 { 1757 return -ENODEV; 1758 } 1759 static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} 1760 #endif 1761 1762 /* 1763 * Debugfs 1764 */ 1765 struct amdgpu_debugfs { 1766 const struct drm_info_list *files; 1767 unsigned num_files; 1768 }; 1769 1770 int amdgpu_debugfs_add_files(struct amdgpu_device *adev, 1771 const struct drm_info_list *files, 1772 unsigned nfiles); 1773 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); 1774 1775 #if defined(CONFIG_DEBUG_FS) 1776 int amdgpu_debugfs_init(struct drm_minor *minor); 1777 void amdgpu_debugfs_cleanup(struct drm_minor *minor); 1778 #endif 1779 1780 /* 1781 * amdgpu smumgr functions 1782 */ 1783 struct amdgpu_smumgr_funcs { 1784 int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype); 1785 int (*request_smu_load_fw)(struct amdgpu_device *adev); 1786 int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype); 1787 }; 1788 1789 /* 1790 * amdgpu smumgr 1791 */ 1792 struct amdgpu_smumgr { 1793 struct amdgpu_bo *toc_buf; 1794 struct amdgpu_bo *smu_buf; 1795 /* asic priv smu data */ 1796 void *priv; 1797 spinlock_t smu_lock; 1798 /* smumgr functions */ 1799 const struct amdgpu_smumgr_funcs *smumgr_funcs; 1800 /* ucode loading complete flag */ 1801 uint32_t fw_flags; 1802 }; 1803 1804 /* 1805 * ASIC specific register table accessible by UMD 1806 */ 1807 struct amdgpu_allowed_register_entry { 1808 uint32_t reg_offset; 1809 bool untouched; 1810 bool grbm_indexed; 1811 }; 1812 1813 /* 1814 * ASIC specific functions. 1815 */ 1816 struct amdgpu_asic_funcs { 1817 bool (*read_disabled_bios)(struct amdgpu_device *adev); 1818 bool (*read_bios_from_rom)(struct amdgpu_device *adev, 1819 u8 *bios, u32 length_bytes); 1820 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 1821 u32 sh_num, u32 reg_offset, u32 *value); 1822 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 1823 int (*reset)(struct amdgpu_device *adev); 1824 /* wait for mc_idle */ 1825 int (*wait_for_mc_idle)(struct amdgpu_device *adev); 1826 /* get the reference clock */ 1827 u32 (*get_xclk)(struct amdgpu_device *adev); 1828 /* get the gpu clock counter */ 1829 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); 1830 /* MM block clocks */ 1831 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 1832 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1833 /* query virtual capabilities */ 1834 u32 (*get_virtual_caps)(struct amdgpu_device *adev); 1835 }; 1836 1837 /* 1838 * IOCTL. 1839 */ 1840 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, 1841 struct drm_file *filp); 1842 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 1843 struct drm_file *filp); 1844 1845 int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data, 1846 struct drm_file *filp); 1847 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, 1848 struct drm_file *filp); 1849 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, 1850 struct drm_file *filp); 1851 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 1852 struct drm_file *filp); 1853 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 1854 struct drm_file *filp); 1855 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 1856 struct drm_file *filp); 1857 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1858 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1859 1860 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 1861 struct drm_file *filp); 1862 1863 /* VRAM scratch page for HDP bug, default vram page */ 1864 struct amdgpu_vram_scratch { 1865 struct amdgpu_bo *robj; 1866 volatile uint32_t *ptr; 1867 u64 gpu_addr; 1868 }; 1869 1870 /* 1871 * ACPI 1872 */ 1873 struct amdgpu_atif_notification_cfg { 1874 bool enabled; 1875 int command_code; 1876 }; 1877 1878 struct amdgpu_atif_notifications { 1879 bool display_switch; 1880 bool expansion_mode_change; 1881 bool thermal_state; 1882 bool forced_power_state; 1883 bool system_power_state; 1884 bool display_conf_change; 1885 bool px_gfx_switch; 1886 bool brightness_change; 1887 bool dgpu_display_event; 1888 }; 1889 1890 struct amdgpu_atif_functions { 1891 bool system_params; 1892 bool sbios_requests; 1893 bool select_active_disp; 1894 bool lid_state; 1895 bool get_tv_standard; 1896 bool set_tv_standard; 1897 bool get_panel_expansion_mode; 1898 bool set_panel_expansion_mode; 1899 bool temperature_change; 1900 bool graphics_device_types; 1901 }; 1902 1903 struct amdgpu_atif { 1904 struct amdgpu_atif_notifications notifications; 1905 struct amdgpu_atif_functions functions; 1906 struct amdgpu_atif_notification_cfg notification_cfg; 1907 struct amdgpu_encoder *encoder_for_bl; 1908 }; 1909 1910 struct amdgpu_atcs_functions { 1911 bool get_ext_state; 1912 bool pcie_perf_req; 1913 bool pcie_dev_rdy; 1914 bool pcie_bus_width; 1915 }; 1916 1917 struct amdgpu_atcs { 1918 struct amdgpu_atcs_functions functions; 1919 }; 1920 1921 /* 1922 * CGS 1923 */ 1924 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 1925 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 1926 1927 1928 /* GPU virtualization */ 1929 #define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0) 1930 #define AMDGPU_VIRT_CAPS_IS_VF (1 << 1) 1931 struct amdgpu_virtualization { 1932 bool supports_sr_iov; 1933 bool is_virtual; 1934 u32 caps; 1935 }; 1936 1937 /* 1938 * Core structure, functions and helpers. 1939 */ 1940 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); 1941 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1942 1943 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1944 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 1945 1946 struct amdgpu_ip_block_status { 1947 bool valid; 1948 bool sw; 1949 bool hw; 1950 }; 1951 1952 struct amdgpu_device { 1953 struct device *dev; 1954 struct drm_device *ddev; 1955 struct pci_dev *pdev; 1956 1957 #ifdef CONFIG_DRM_AMD_ACP 1958 struct amdgpu_acp acp; 1959 #endif 1960 1961 /* ASIC */ 1962 enum amd_asic_type asic_type; 1963 uint32_t family; 1964 uint32_t rev_id; 1965 uint32_t external_rev_id; 1966 unsigned long flags; 1967 int usec_timeout; 1968 const struct amdgpu_asic_funcs *asic_funcs; 1969 bool shutdown; 1970 bool need_dma32; 1971 bool accel_working; 1972 struct work_struct reset_work; 1973 struct notifier_block acpi_nb; 1974 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 1975 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 1976 unsigned debugfs_count; 1977 #if defined(CONFIG_DEBUG_FS) 1978 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 1979 #endif 1980 struct amdgpu_atif atif; 1981 struct amdgpu_atcs atcs; 1982 struct mutex srbm_mutex; 1983 /* GRBM index mutex. Protects concurrent access to GRBM index */ 1984 struct mutex grbm_idx_mutex; 1985 struct dev_pm_domain vga_pm_domain; 1986 bool have_disp_power_ref; 1987 1988 /* BIOS */ 1989 uint8_t *bios; 1990 bool is_atom_bios; 1991 struct amdgpu_bo *stollen_vga_memory; 1992 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; 1993 1994 /* Register/doorbell mmio */ 1995 resource_size_t rmmio_base; 1996 resource_size_t rmmio_size; 1997 void __iomem *rmmio; 1998 /* protects concurrent MM_INDEX/DATA based register access */ 1999 spinlock_t mmio_idx_lock; 2000 /* protects concurrent SMC based register access */ 2001 spinlock_t smc_idx_lock; 2002 amdgpu_rreg_t smc_rreg; 2003 amdgpu_wreg_t smc_wreg; 2004 /* protects concurrent PCIE register access */ 2005 spinlock_t pcie_idx_lock; 2006 amdgpu_rreg_t pcie_rreg; 2007 amdgpu_wreg_t pcie_wreg; 2008 /* protects concurrent UVD register access */ 2009 spinlock_t uvd_ctx_idx_lock; 2010 amdgpu_rreg_t uvd_ctx_rreg; 2011 amdgpu_wreg_t uvd_ctx_wreg; 2012 /* protects concurrent DIDT register access */ 2013 spinlock_t didt_idx_lock; 2014 amdgpu_rreg_t didt_rreg; 2015 amdgpu_wreg_t didt_wreg; 2016 /* protects concurrent ENDPOINT (audio) register access */ 2017 spinlock_t audio_endpt_idx_lock; 2018 amdgpu_block_rreg_t audio_endpt_rreg; 2019 amdgpu_block_wreg_t audio_endpt_wreg; 2020 void __iomem *rio_mem; 2021 resource_size_t rio_mem_size; 2022 struct amdgpu_doorbell doorbell; 2023 2024 /* clock/pll info */ 2025 struct amdgpu_clock clock; 2026 2027 /* MC */ 2028 struct amdgpu_mc mc; 2029 struct amdgpu_gart gart; 2030 struct amdgpu_dummy_page dummy_page; 2031 struct amdgpu_vm_manager vm_manager; 2032 2033 /* memory management */ 2034 struct amdgpu_mman mman; 2035 struct amdgpu_vram_scratch vram_scratch; 2036 struct amdgpu_wb wb; 2037 atomic64_t vram_usage; 2038 atomic64_t vram_vis_usage; 2039 atomic64_t gtt_usage; 2040 atomic64_t num_bytes_moved; 2041 atomic_t gpu_reset_counter; 2042 2043 /* display */ 2044 struct amdgpu_mode_info mode_info; 2045 struct work_struct hotplug_work; 2046 struct amdgpu_irq_src crtc_irq; 2047 struct amdgpu_irq_src pageflip_irq; 2048 struct amdgpu_irq_src hpd_irq; 2049 2050 /* rings */ 2051 u64 fence_context; 2052 unsigned num_rings; 2053 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 2054 bool ib_pool_ready; 2055 struct amdgpu_sa_manager ring_tmp_bo; 2056 2057 /* interrupts */ 2058 struct amdgpu_irq irq; 2059 2060 /* powerplay */ 2061 struct amd_powerplay powerplay; 2062 bool pp_enabled; 2063 bool pp_force_state_enabled; 2064 2065 /* dpm */ 2066 struct amdgpu_pm pm; 2067 u32 cg_flags; 2068 u32 pg_flags; 2069 2070 /* amdgpu smumgr */ 2071 struct amdgpu_smumgr smu; 2072 2073 /* gfx */ 2074 struct amdgpu_gfx gfx; 2075 2076 /* sdma */ 2077 struct amdgpu_sdma sdma; 2078 2079 /* uvd */ 2080 struct amdgpu_uvd uvd; 2081 2082 /* vce */ 2083 struct amdgpu_vce vce; 2084 2085 /* firmwares */ 2086 struct amdgpu_firmware firmware; 2087 2088 /* GDS */ 2089 struct amdgpu_gds gds; 2090 2091 const struct amdgpu_ip_block_version *ip_blocks; 2092 int num_ip_blocks; 2093 struct amdgpu_ip_block_status *ip_block_status; 2094 struct mutex mn_lock; 2095 DECLARE_HASHTABLE(mn_hash, 7); 2096 2097 /* tracking pinned memory */ 2098 u64 vram_pin_size; 2099 u64 invisible_pin_size; 2100 u64 gart_pin_size; 2101 2102 /* amdkfd interface */ 2103 struct kfd_dev *kfd; 2104 2105 struct amdgpu_virtualization virtualization; 2106 }; 2107 2108 bool amdgpu_device_is_px(struct drm_device *dev); 2109 int amdgpu_device_init(struct amdgpu_device *adev, 2110 struct drm_device *ddev, 2111 struct pci_dev *pdev, 2112 uint32_t flags); 2113 void amdgpu_device_fini(struct amdgpu_device *adev); 2114 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); 2115 2116 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 2117 bool always_indirect); 2118 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 2119 bool always_indirect); 2120 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); 2121 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); 2122 2123 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); 2124 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); 2125 2126 /* 2127 * Registers read & write functions. 2128 */ 2129 #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false) 2130 #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true) 2131 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false)) 2132 #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false) 2133 #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true) 2134 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 2135 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 2136 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) 2137 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) 2138 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) 2139 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) 2140 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) 2141 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 2142 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 2143 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 2144 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 2145 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 2146 #define WREG32_P(reg, val, mask) \ 2147 do { \ 2148 uint32_t tmp_ = RREG32(reg); \ 2149 tmp_ &= (mask); \ 2150 tmp_ |= ((val) & ~(mask)); \ 2151 WREG32(reg, tmp_); \ 2152 } while (0) 2153 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 2154 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 2155 #define WREG32_PLL_P(reg, val, mask) \ 2156 do { \ 2157 uint32_t tmp_ = RREG32_PLL(reg); \ 2158 tmp_ &= (mask); \ 2159 tmp_ |= ((val) & ~(mask)); \ 2160 WREG32_PLL(reg, tmp_); \ 2161 } while (0) 2162 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) 2163 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) 2164 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) 2165 2166 #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) 2167 #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) 2168 2169 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 2170 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK 2171 2172 #define REG_SET_FIELD(orig_val, reg, field, field_val) \ 2173 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ 2174 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) 2175 2176 #define REG_GET_FIELD(value, reg, field) \ 2177 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) 2178 2179 /* 2180 * BIOS helpers. 2181 */ 2182 #define RBIOS8(i) (adev->bios[i]) 2183 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 2184 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 2185 2186 /* 2187 * RING helpers. 2188 */ 2189 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) 2190 { 2191 if (ring->count_dw <= 0) 2192 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); 2193 ring->ring[ring->wptr++] = v; 2194 ring->wptr &= ring->ptr_mask; 2195 ring->count_dw--; 2196 } 2197 2198 static inline struct amdgpu_sdma_instance * 2199 amdgpu_get_sdma_instance(struct amdgpu_ring *ring) 2200 { 2201 struct amdgpu_device *adev = ring->adev; 2202 int i; 2203 2204 for (i = 0; i < adev->sdma.num_instances; i++) 2205 if (&adev->sdma.instance[i].ring == ring) 2206 break; 2207 2208 if (i < AMDGPU_MAX_SDMA_INSTANCES) 2209 return &adev->sdma.instance[i]; 2210 else 2211 return NULL; 2212 } 2213 2214 /* 2215 * ASICs macro. 2216 */ 2217 #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) 2218 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 2219 #define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev)) 2220 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 2221 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2222 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2223 #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) 2224 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 2225 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2226 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 2227 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 2228 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) 2229 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) 2230 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) 2231 #define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) 2232 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 2233 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 2234 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 2235 #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) 2236 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 2237 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 2238 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 2239 #define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c)) 2240 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) 2241 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) 2242 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) 2243 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) 2244 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) 2245 #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r)) 2246 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) 2247 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) 2248 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) 2249 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) 2250 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) 2251 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) 2252 #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r)) 2253 #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) 2254 #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc)) 2255 #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev)) 2256 #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) 2257 #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) 2258 #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) 2259 #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) 2260 #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) 2261 #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) 2262 #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async)) 2263 #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) 2264 #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) 2265 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) 2266 #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) 2267 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) 2268 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) 2269 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) 2270 #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) 2271 #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) 2272 #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) 2273 #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) 2274 #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) 2275 #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) 2276 #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) 2277 2278 #define amdgpu_dpm_get_temperature(adev) \ 2279 ((adev)->pp_enabled ? \ 2280 (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ 2281 (adev)->pm.funcs->get_temperature((adev))) 2282 2283 #define amdgpu_dpm_set_fan_control_mode(adev, m) \ 2284 ((adev)->pp_enabled ? \ 2285 (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ 2286 (adev)->pm.funcs->set_fan_control_mode((adev), (m))) 2287 2288 #define amdgpu_dpm_get_fan_control_mode(adev) \ 2289 ((adev)->pp_enabled ? \ 2290 (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ 2291 (adev)->pm.funcs->get_fan_control_mode((adev))) 2292 2293 #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ 2294 ((adev)->pp_enabled ? \ 2295 (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ 2296 (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) 2297 2298 #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ 2299 ((adev)->pp_enabled ? \ 2300 (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ 2301 (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) 2302 2303 #define amdgpu_dpm_get_sclk(adev, l) \ 2304 ((adev)->pp_enabled ? \ 2305 (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ 2306 (adev)->pm.funcs->get_sclk((adev), (l))) 2307 2308 #define amdgpu_dpm_get_mclk(adev, l) \ 2309 ((adev)->pp_enabled ? \ 2310 (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ 2311 (adev)->pm.funcs->get_mclk((adev), (l))) 2312 2313 2314 #define amdgpu_dpm_force_performance_level(adev, l) \ 2315 ((adev)->pp_enabled ? \ 2316 (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ 2317 (adev)->pm.funcs->force_performance_level((adev), (l))) 2318 2319 #define amdgpu_dpm_powergate_uvd(adev, g) \ 2320 ((adev)->pp_enabled ? \ 2321 (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ 2322 (adev)->pm.funcs->powergate_uvd((adev), (g))) 2323 2324 #define amdgpu_dpm_powergate_vce(adev, g) \ 2325 ((adev)->pp_enabled ? \ 2326 (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ 2327 (adev)->pm.funcs->powergate_vce((adev), (g))) 2328 2329 #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ 2330 ((adev)->pp_enabled ? \ 2331 (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ 2332 (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))) 2333 2334 #define amdgpu_dpm_get_current_power_state(adev) \ 2335 (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) 2336 2337 #define amdgpu_dpm_get_performance_level(adev) \ 2338 (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) 2339 2340 #define amdgpu_dpm_get_pp_num_states(adev, data) \ 2341 (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) 2342 2343 #define amdgpu_dpm_get_pp_table(adev, table) \ 2344 (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) 2345 2346 #define amdgpu_dpm_set_pp_table(adev, buf, size) \ 2347 (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) 2348 2349 #define amdgpu_dpm_print_clock_levels(adev, type, buf) \ 2350 (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) 2351 2352 #define amdgpu_dpm_force_clock_level(adev, type, level) \ 2353 (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) 2354 2355 #define amdgpu_dpm_get_sclk_od(adev) \ 2356 (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) 2357 2358 #define amdgpu_dpm_set_sclk_od(adev, value) \ 2359 (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) 2360 2361 #define amdgpu_dpm_get_mclk_od(adev) \ 2362 ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) 2363 2364 #define amdgpu_dpm_set_mclk_od(adev, value) \ 2365 ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) 2366 2367 #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ 2368 (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) 2369 2370 #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) 2371 2372 /* Common functions */ 2373 int amdgpu_gpu_reset(struct amdgpu_device *adev); 2374 void amdgpu_pci_config_reset(struct amdgpu_device *adev); 2375 bool amdgpu_card_posted(struct amdgpu_device *adev); 2376 void amdgpu_update_display_priority(struct amdgpu_device *adev); 2377 2378 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2379 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 2380 u32 ip_instance, u32 ring, 2381 struct amdgpu_ring **out_ring); 2382 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); 2383 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 2384 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); 2385 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 2386 uint32_t flags); 2387 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 2388 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 2389 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 2390 unsigned long end); 2391 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 2392 int *last_invalidated); 2393 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 2394 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 2395 struct ttm_mem_reg *mem); 2396 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); 2397 void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); 2398 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); 2399 void amdgpu_program_register_sequence(struct amdgpu_device *adev, 2400 const u32 *registers, 2401 const u32 array_size); 2402 2403 bool amdgpu_device_is_px(struct drm_device *dev); 2404 /* atpx handler */ 2405 #if defined(CONFIG_VGA_SWITCHEROO) 2406 void amdgpu_register_atpx_handler(void); 2407 void amdgpu_unregister_atpx_handler(void); 2408 bool amdgpu_has_atpx_dgpu_power_cntl(void); 2409 bool amdgpu_is_atpx_hybrid(void); 2410 #else 2411 static inline void amdgpu_register_atpx_handler(void) {} 2412 static inline void amdgpu_unregister_atpx_handler(void) {} 2413 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 2414 static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 2415 #endif 2416 2417 /* 2418 * KMS 2419 */ 2420 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 2421 extern const int amdgpu_max_kms_ioctl; 2422 2423 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); 2424 int amdgpu_driver_unload_kms(struct drm_device *dev); 2425 void amdgpu_driver_lastclose_kms(struct drm_device *dev); 2426 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 2427 void amdgpu_driver_postclose_kms(struct drm_device *dev, 2428 struct drm_file *file_priv); 2429 void amdgpu_driver_preclose_kms(struct drm_device *dev, 2430 struct drm_file *file_priv); 2431 int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon); 2432 int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon); 2433 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); 2434 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); 2435 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); 2436 int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe, 2437 int *max_error, 2438 struct timeval *vblank_time, 2439 unsigned flags); 2440 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, 2441 unsigned long arg); 2442 2443 /* 2444 * functions used by amdgpu_encoder.c 2445 */ 2446 struct amdgpu_afmt_acr { 2447 u32 clock; 2448 2449 int n_32khz; 2450 int cts_32khz; 2451 2452 int n_44_1khz; 2453 int cts_44_1khz; 2454 2455 int n_48khz; 2456 int cts_48khz; 2457 2458 }; 2459 2460 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); 2461 2462 /* amdgpu_acpi.c */ 2463 #if defined(CONFIG_ACPI) 2464 int amdgpu_acpi_init(struct amdgpu_device *adev); 2465 void amdgpu_acpi_fini(struct amdgpu_device *adev); 2466 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); 2467 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, 2468 u8 perf_req, bool advertise); 2469 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); 2470 #else 2471 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 2472 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 2473 #endif 2474 2475 struct amdgpu_bo_va_mapping * 2476 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 2477 uint64_t addr, struct amdgpu_bo **bo); 2478 2479 #include "amdgpu_object.h" 2480 #endif 2481