| /linux-6.15/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_gpu.c | 429 gpu->identity.model, gpu->identity.revision); in etnaviv_hw_identify() 511 gpu->base_rate_core >> gpu->freq_scale); in etnaviv_gpu_update_clock() 513 gpu->base_rate_shader >> gpu->freq_scale); in etnaviv_gpu_update_clock() 528 gpu->fe_waitcycles = clamp(gpu->base_rate_core >> (15 - gpu->freq_scale), in etnaviv_gpu_update_clock() 1178 f->gpu = gpu; in etnaviv_gpu_fence_alloc() 1181 gpu->fence_context, ++gpu->next_fence); in etnaviv_gpu_fence_alloc() 1395 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_gpu_submit() local 1468 event_free(gpu, gpu->sync_point_event); in sync_point_worker() 1476 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_gpu_recover_hang() local 1600 queue_work(gpu->wq, &gpu->sync_point_work); in irq_handler() [all …]
|
| H A D | etnaviv_sched.c | 38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() local 58 mutex_lock(&gpu->lock); in etnaviv_sched_timedout_job() 63 mutex_unlock(&gpu->lock); in etnaviv_sched_timedout_job() 66 (gpu->completed_fence != gpu->hangcheck_fence || in etnaviv_sched_timedout_job() 71 gpu->hangcheck_primid = primid; in etnaviv_sched_timedout_job() 72 gpu->hangcheck_fence = gpu->completed_fence; in etnaviv_sched_timedout_job() 113 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_push_job() local 121 mutex_lock(&gpu->sched_lock); in etnaviv_sched_push_job() 153 .name = dev_name(gpu->dev), in etnaviv_sched_init() 154 .dev = gpu->dev, in etnaviv_sched_init() [all …]
|
| H A D | etnaviv_buffer.c | 95 lockdep_assert_held(&gpu->lock); in etnaviv_cmd_select_pipe() 103 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_cmd_select_pipe() 167 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_init() 184 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_config_mmuv2() 219 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_config_pta() 241 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_end() 243 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_buffer_end() 307 lockdep_assert_held(&gpu->lock); in etnaviv_sync_point_queue() 355 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_queue() 438 gpu->flush_seq = new_flush_seq; in etnaviv_buffer_queue() [all …]
|
| H A D | etnaviv_gpu.h | 172 writel(data, gpu->mmio + reg); in gpu_write() 182 readl(gpu->mmio + reg); in gpu_read() 184 return readl(gpu->mmio + reg); in gpu_read() 190 if (gpu->identity.model == chipModel_GC300 && in gpu_fix_power_address() 191 gpu->identity.revision < 0x2000) in gpu_fix_power_address() 199 writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg)); in gpu_write_power() 204 return readl(gpu->mmio + gpu_fix_power_address(gpu, reg)); in gpu_read_power() 209 int etnaviv_gpu_init(struct etnaviv_gpu *gpu); 217 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); 224 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu); [all …]
|
| H A D | etnaviv_perfmon.c | 18 u32 (*sample)(struct etnaviv_gpu *gpu, 65 lockdep_assert_held(&gpu->lock); in pipe_perf_reg_read() 68 pipe_select(gpu, clock, i); in pipe_perf_reg_read() 73 pipe_select(gpu, clock, 0); in pipe_perf_reg_read() 86 lockdep_assert_held(&gpu->lock); in pipe_reg_read() 89 pipe_select(gpu, clock, i); in pipe_reg_read() 90 value += gpu_read(gpu, signal->data); in pipe_reg_read() 94 pipe_select(gpu, clock, 0); in pipe_reg_read() 110 return gpu_read(gpu, reg); in hi_total_cycle_read() 124 return gpu_read(gpu, reg); in hi_total_idle_cycle_read() [all …]
|
| H A D | etnaviv_drv.c | 85 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_open() local 88 if (gpu) { in etnaviv_open() 112 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_postclose() local 114 if (gpu) in etnaviv_postclose() 234 gpu = priv->gpu[i]; in show_each_gpu() 276 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_get_param() 277 if (!gpu) in etnaviv_ioctl_get_param() 372 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_wait_fence() 423 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_gem_wait() 451 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_pm_query_dom() [all …]
|
| /linux-6.15/drivers/gpu/drm/msm/ |
| H A D | msm_gpu.c | 60 if (gpu->core_clk && gpu->fast_rate) in enable_clk() 169 ret = gpu->funcs->hw_init(gpu); in msm_gpu_hw_init() 207 gpu->funcs->show(gpu, state, &p); in msm_gpu_devcoredump_read() 272 state = gpu->funcs->gpu_state_get(gpu); in msm_gpu_crashstate_capture() 437 gpu->funcs->recover(gpu); in recover_worker() 449 gpu->funcs->submit(gpu, submit); in recover_worker() 515 if (!gpu->funcs->progress(gpu, ring)) in made_progress() 794 gpu->funcs->submit(gpu, submit); in msm_gpu_submit() 808 return gpu->funcs->irq(gpu); in irq_handler() 948 gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); in msm_gpu_init() [all …]
|
| H A D | msm_gpu_devfreq.c | 48 gpu->funcs->gpu_set_freq(gpu, opp, df->suspended); in msm_devfreq_target() 72 return gpu->funcs->gpu_get_freq(gpu); in get_freq() 99 busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate); in msm_devfreq_get_dev_status() 195 gpu->cooling = NULL; in msm_devfreq_init() 220 if (!has_devfreq(gpu)) in msm_devfreq_cleanup() 232 if (!has_devfreq(gpu)) in msm_devfreq_resume() 236 df->busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate); in msm_devfreq_resume() 248 if (!has_devfreq(gpu)) in msm_devfreq_suspend() 274 if (!has_devfreq(gpu)) in msm_devfreq_boost() 277 freq = get_freq(gpu); in msm_devfreq_boost() [all …]
|
| H A D | msm_gpu.h | 84 (struct msm_gpu *gpu); 475 if (rn >= gpu->nr_rings) in msm_gpu_convert_priority() 684 mutex_lock(&gpu->lock); in msm_gpu_crashstate_get() 686 if (gpu->crashstate) { in msm_gpu_crashstate_get() 688 state = gpu->crashstate; in msm_gpu_crashstate_get() 691 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_get() 698 mutex_lock(&gpu->lock); in msm_gpu_crashstate_put() 700 if (gpu->crashstate) { in msm_gpu_crashstate_put() 701 if (gpu->funcs->gpu_state_put(gpu->crashstate)) in msm_gpu_crashstate_put() 702 gpu->crashstate = NULL; in msm_gpu_crashstate_put() [all …]
|
| /linux-6.15/drivers/gpu/drm/msm/adreno/ |
| H A D | a4xx_gpu.c | 180 return a4xx_idle(gpu); in a4xx_me_init() 325 gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); in a4xx_hw_init() 363 a4xx_dump(gpu); in a4xx_recover() 368 adreno_recover(gpu); in a4xx_recover() 376 DBG("%s", gpu->name); in a4xx_destroy() 388 if (!adreno_idle(gpu, gpu->rb[0])) in a4xx_idle() 418 msm_gpu_retire(gpu); in a4xx_irq() 569 adreno_dump(gpu); in a4xx_dump() 658 struct msm_gpu *gpu; in a4xx_gpu_init() local 698 if (!gpu->aspace) { in a4xx_gpu_init() [all …]
|
| H A D | a3xx_gpu.c | 109 return a3xx_idle(gpu); in a3xx_me_init() 119 DBG("%s", gpu->name); in a3xx_hw_init() 289 gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); in a3xx_hw_init() 379 a3xx_dump(gpu); in a3xx_recover() 384 adreno_recover(gpu); in a3xx_recover() 404 if (!adreno_idle(gpu, gpu->rb[0])) in a3xx_idle() 430 msm_gpu_retire(gpu); in a3xx_irq() 478 adreno_dump(gpu); in a3xx_dump() 545 struct msm_gpu *gpu; in a3xx_gpu_init() local 584 if (!gpu->aspace) { in a3xx_gpu_init() [all …]
|
| H A D | adreno_gpu.h | 294 return gpu->chip_id & 0xff; in adreno_patchid() 299 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_revn() 306 return gpu->gmu_is_wrapper; in adreno_has_gmu_wrapper() 311 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_a2xx() 318 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_a20x() 362 return adreno_is_a330(gpu) && (adreno_patchid(gpu) > 0); in adreno_is_a330v2() 437 return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu); in adreno_is_a619_holi() 497 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_a610_family() 501 return adreno_is_a610(gpu) || adreno_is_a702(gpu); in adreno_is_a610_family() 507 return adreno_is_a618(gpu) || in adreno_is_a615_family() [all …]
|
| H A D | a5xx_gpu.c | 932 gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova); in a5xx_hw_init() 972 a5xx_flush(gpu, gpu->rb[0], true); in a5xx_hw_init() 973 if (!a5xx_idle(gpu, gpu->rb[0])) in a5xx_hw_init() 990 a5xx_flush(gpu, gpu->rb[0], true); in a5xx_hw_init() 991 if (!a5xx_idle(gpu, gpu->rb[0])) in a5xx_hw_init() 1025 a5xx_dump(gpu); in a5xx_recover() 1234 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a5xx_fault_detect_irq() 1258 kthread_queue_work(gpu->worker, &gpu->recover_work); in a5xx_fault_detect_irq() 1381 gpu->name, in a5xx_pm_resume() 1392 gpu->name); in a5xx_pm_resume() [all …]
|
| H A D | a5xx_power.c | 164 gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate)); in a530_lm_setup() 165 gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000); in a530_lm_setup() 199 gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate)); in a540_lm_setup() 200 gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000); in a540_lm_setup() 247 gpu->name); in a5xx_gpmu_init() 264 gpu->name); in a5xx_gpmu_init() 271 gpu->name, val); in a5xx_gpmu_init() 306 a530_lm_setup(gpu); in a5xx_power_init() 308 a540_lm_setup(gpu); in a5xx_power_init() 311 a5xx_pc_init(gpu); in a5xx_power_init() [all …]
|
| H A D | a6xx_gpu.c | 1306 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova); in hw_init() 1364 a6xx_flush(gpu, gpu->rb[0]); in hw_init() 1365 if (!a6xx_idle(gpu, gpu->rb[0])) in hw_init() 1437 a6xx_dump(gpu); in a6xx_recover() 1679 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a6xx_fault_detect_irq() 1711 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_fault_detect_irq() 1731 kthread_queue_work(gpu->worker, &gpu->recover_work); in a7xx_sw_fuse_violation_irq() 2072 ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_resume() 2141 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_suspend() 2547 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, in a6xx_gpu_init() [all …]
|
| H A D | a2xx_gpu.c | 105 return a2xx_idle(gpu); in a2xx_me_init() 118 DBG("%s", gpu->name); in a2xx_hw_init() 219 gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); in a2xx_hw_init() 279 a2xx_dump(gpu); in a2xx_recover() 284 adreno_recover(gpu); in a2xx_recover() 302 if (!adreno_idle(gpu, gpu->rb[0])) in a2xx_idle() 351 msm_gpu_retire(gpu); in a2xx_irq() 452 adreno_dump(gpu); in a2xx_dump() 520 struct msm_gpu *gpu; in a2xx_gpu_init() local 554 if (!gpu->aspace) { in a2xx_gpu_init() [all …]
|
| H A D | a5xx_preempt.c | 68 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 90 kthread_queue_work(gpu->worker, &gpu->recover_work); in a5xx_preempt_timer() 101 if (gpu->nr_rings == 1) in a5xx_preempt_trigger() 119 ring = get_next_ring(gpu); in a5xx_preempt_trigger() 197 gpu->name); in a5xx_preempt_irq() 198 kthread_queue_work(gpu->worker, &gpu->recover_work); in a5xx_preempt_irq() 213 a5xx_preempt_trigger(gpu); in a5xx_preempt_irq() 226 if (gpu->nr_rings == 1) in a5xx_preempt_hw_init() 310 if (gpu->nr_rings <= 1) in a5xx_preempt_init() 319 a5xx_preempt_fini(gpu); in a5xx_preempt_init() [all …]
|
| H A D | a6xx_preempt.c | 76 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 98 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_preempt_timer() 164 gpu->name); in a6xx_preempt_irq() 165 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_preempt_irq() 194 if (gpu->nr_rings == 1) in a6xx_preempt_hw_init() 232 if (gpu->nr_rings == 1) in a6xx_preempt_trigger() 304 gpu_write64(gpu, in a6xx_preempt_trigger() 308 gpu_write64(gpu, in a6xx_preempt_trigger() 417 if (gpu->nr_rings <= 1) in a6xx_preempt_init() 449 a6xx_preempt_fini(gpu); in a6xx_preempt_init() [all …]
|
| H A D | adreno_device.c | 73 if (!gpu) { in adreno_load_gpu() 91 ret = gpu->funcs->ucode_load(gpu); in adreno_load_gpu() 121 gpu->funcs->debugfs_init(gpu, dev->primary); in adreno_load_gpu() 122 gpu->funcs->debugfs_init(gpu, dev->render); in adreno_load_gpu() 126 return gpu; in adreno_load_gpu() 213 if (IS_ERR(gpu)) { in adreno_bind() 234 gpu->funcs->destroy(gpu); in adreno_unbind() 301 return gpu->funcs->pm_resume(gpu); in adreno_runtime_resume() 315 return gpu->funcs->pm_suspend(gpu); in adreno_runtime_suspend() 356 if (!gpu) in adreno_system_suspend() [all …]
|
| H A D | a5xx_debugfs.c | 23 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); in pfp_print() 36 gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); in me_print() 49 gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); in meq_print() 79 show(priv->gpu, &p); in show() 97 struct msm_gpu *gpu = priv->gpu; in reset_set() local 110 mutex_lock(&gpu->lock); in reset_set() 130 gpu->needs_hw_init = true; in reset_set() 132 pm_runtime_get_sync(&gpu->pdev->dev); in reset_set() 133 gpu->funcs->recover(gpu); in reset_set() 135 pm_runtime_put_sync(&gpu->pdev->dev); in reset_set() [all …]
|
| H A D | adreno_gpu.c | 265 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in adreno_fault_handler() 303 kthread_queue_work(gpu->worker, &gpu->fault_work); in adreno_fault_handler() 582 VERB("%s", gpu->name); in adreno_hw_init() 621 return gpu->funcs->get_rptr(gpu, ring); in get_rptr() 626 return gpu->rb[0]; in adreno_active_ring() 637 gpu->funcs->pm_suspend(gpu); in adreno_recover() 638 gpu->funcs->pm_resume(gpu); in adreno_recover() 1011 struct msm_gpu *gpu) in adreno_get_pwrlevels() argument 1018 gpu->fast_rate = 0; in adreno_get_pwrlevels() 1115 gpu->pdev = pdev; in adreno_gpu_init() [all …]
|
| H A D | a6xx_gpu_state.c | 135 SZ_1M, MSM_BO_WC, gpu->aspace, in a6xx_crashdumper_init() 227 gpu_write(gpu, ctrl0, reg); in vbif_debugbus_read() 230 gpu_write(gpu, ctrl1, i); in vbif_debugbus_read() 276 ptr += vbif_debugbus_read(gpu, in a6xx_get_vbif_debugbus_block() 282 ptr += vbif_debugbus_read(gpu, in a6xx_get_vbif_debugbus_block() 291 ptr += vbif_debugbus_read(gpu, in a6xx_get_vbif_debugbus_block() 352 a6xx_get_debugbus_block(gpu, in a6xx_get_debugbus_blocks() 375 a6xx_get_debugbus_block(gpu, in a6xx_get_debugbus_blocks() 416 a6xx_get_debugbus_block(gpu, in a7xx_get_debugbus_blocks() 422 a6xx_get_debugbus_block(gpu, in a7xx_get_debugbus_blocks() [all …]
|
| /linux-6.15/drivers/gpu/drm/ci/xfails/ |
| H A D | msm-sm8350-hdk-skips.txt | 23 # [ 200.895243] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=PERMISS… 24 # [ 200.906885] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 25 # [ 200.917625] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 26 # [ 200.928353] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 27 # [ 200.939084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 28 # [ 200.949815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 30 # [ 200.960467] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 31 # [ 200.960500] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 32 # [ 200.995966] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 94 # [ 228.161164] watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [gpu-worker:150] [all …]
|
| /linux-6.15/Documentation/gpu/ |
| H A D | drm-kms-helpers.rst | 151 .. kernel-doc:: drivers/gpu/drm/drm_bridge.c 157 .. kernel-doc:: drivers/gpu/drm/drm_bridge.c 163 .. kernel-doc:: drivers/gpu/drm/drm_bridge.c 169 .. kernel-doc:: drivers/gpu/drm/drm_bridge.c 212 .. kernel-doc:: drivers/gpu/drm/drm_panel.c 218 .. kernel-doc:: drivers/gpu/drm/drm_panel.c 363 .. kernel-doc:: drivers/gpu/drm/drm_edid.c 369 .. kernel-doc:: drivers/gpu/drm/drm_eld.c 407 .. kernel-doc:: drivers/gpu/drm/drm_rect.c 434 .. kernel-doc:: drivers/gpu/drm/drm_of.c [all …]
|
| /linux-6.15/drivers/gpu/drm/panthor/ |
| H A D | panthor_gpu.c | 165 spin_lock(&ptdev->gpu->reqs_lock); in panthor_gpu_irq_handler() 167 ptdev->gpu->pending_reqs &= ~status; in panthor_gpu_irq_handler() 168 wake_up_all(&ptdev->gpu->reqs_acked); in panthor_gpu_irq_handler() 170 spin_unlock(&ptdev->gpu->reqs_lock); in panthor_gpu_irq_handler() 188 ptdev->gpu->pending_reqs = 0; in panthor_gpu_unplug() 189 wake_up_all(&ptdev->gpu->reqs_acked); in panthor_gpu_unplug() 201 struct panthor_gpu *gpu; in panthor_gpu_init() local 205 gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL); in panthor_gpu_init() 206 if (!gpu) in panthor_gpu_init() 209 spin_lock_init(&gpu->reqs_lock); in panthor_gpu_init() [all …]
|