19d9cc9b8SLeo Liu /*
29d9cc9b8SLeo Liu * Copyright 2019 Advanced Micro Devices, Inc.
39d9cc9b8SLeo Liu * All Rights Reserved.
49d9cc9b8SLeo Liu *
59d9cc9b8SLeo Liu * Permission is hereby granted, free of charge, to any person obtaining a
69d9cc9b8SLeo Liu * copy of this software and associated documentation files (the
79d9cc9b8SLeo Liu * "Software"), to deal in the Software without restriction, including
89d9cc9b8SLeo Liu * without limitation the rights to use, copy, modify, merge, publish,
99d9cc9b8SLeo Liu * distribute, sub license, and/or sell copies of the Software, and to
109d9cc9b8SLeo Liu * permit persons to whom the Software is furnished to do so, subject to
119d9cc9b8SLeo Liu * the following conditions:
129d9cc9b8SLeo Liu *
139d9cc9b8SLeo Liu * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
149d9cc9b8SLeo Liu * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
159d9cc9b8SLeo Liu * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
169d9cc9b8SLeo Liu * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
179d9cc9b8SLeo Liu * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
189d9cc9b8SLeo Liu * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
199d9cc9b8SLeo Liu * USE OR OTHER DEALINGS IN THE SOFTWARE.
209d9cc9b8SLeo Liu *
219d9cc9b8SLeo Liu * The above copyright notice and this permission notice (including the
229d9cc9b8SLeo Liu * next paragraph) shall be included in all copies or substantial portions
239d9cc9b8SLeo Liu * of the Software.
249d9cc9b8SLeo Liu *
259d9cc9b8SLeo Liu */
269d9cc9b8SLeo Liu
279d9cc9b8SLeo Liu #include "amdgpu.h"
289d9cc9b8SLeo Liu #include "amdgpu_jpeg.h"
292eb16729SLeo Liu #include "amdgpu_pm.h"
309d9cc9b8SLeo Liu #include "soc15d.h"
319d9cc9b8SLeo Liu #include "soc15_common.h"
329d9cc9b8SLeo Liu
332eb16729SLeo Liu #define JPEG_IDLE_TIMEOUT msecs_to_jiffies(1000)
342eb16729SLeo Liu
352eb16729SLeo Liu static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
36*df996b5eSSathishkumar S static void amdgpu_jpeg_reg_dump_fini(struct amdgpu_device *adev);
372eb16729SLeo Liu
amdgpu_jpeg_sw_init(struct amdgpu_device * adev)382eb16729SLeo Liu int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
392eb16729SLeo Liu {
400a119d53SSaleemkhan Jamadar int i, r;
410a119d53SSaleemkhan Jamadar
422eb16729SLeo Liu INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler);
43651a1465SJames Zhu mutex_init(&adev->jpeg.jpeg_pg_lock);
44651a1465SJames Zhu atomic_set(&adev->jpeg.total_submission_cnt, 0);
452eb16729SLeo Liu
460a119d53SSaleemkhan Jamadar if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
470a119d53SSaleemkhan Jamadar (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG))
480a119d53SSaleemkhan Jamadar adev->jpeg.indirect_sram = true;
490a119d53SSaleemkhan Jamadar
500a119d53SSaleemkhan Jamadar for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
51408d2081SAdvait Dhamorikar if (adev->jpeg.harvest_config & (1U << i))
520a119d53SSaleemkhan Jamadar continue;
530a119d53SSaleemkhan Jamadar
540a119d53SSaleemkhan Jamadar if (adev->jpeg.indirect_sram) {
550a119d53SSaleemkhan Jamadar r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
560a119d53SSaleemkhan Jamadar AMDGPU_GEM_DOMAIN_VRAM |
570a119d53SSaleemkhan Jamadar AMDGPU_GEM_DOMAIN_GTT,
580a119d53SSaleemkhan Jamadar &adev->jpeg.inst[i].dpg_sram_bo,
590a119d53SSaleemkhan Jamadar &adev->jpeg.inst[i].dpg_sram_gpu_addr,
600a119d53SSaleemkhan Jamadar &adev->jpeg.inst[i].dpg_sram_cpu_addr);
610a119d53SSaleemkhan Jamadar if (r) {
620a119d53SSaleemkhan Jamadar dev_err(adev->dev,
630a119d53SSaleemkhan Jamadar "JPEG %d (%d) failed to allocate DPG bo\n", i, r);
640a119d53SSaleemkhan Jamadar return r;
650a119d53SSaleemkhan Jamadar }
660a119d53SSaleemkhan Jamadar }
670a119d53SSaleemkhan Jamadar }
680a119d53SSaleemkhan Jamadar
692eb16729SLeo Liu return 0;
702eb16729SLeo Liu }
712eb16729SLeo Liu
amdgpu_jpeg_sw_fini(struct amdgpu_device * adev)722eb16729SLeo Liu int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
732eb16729SLeo Liu {
74bc224553SJames Zhu int i, j;
752eb16729SLeo Liu
762eb16729SLeo Liu for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
77408d2081SAdvait Dhamorikar if (adev->jpeg.harvest_config & (1U << i))
782eb16729SLeo Liu continue;
792eb16729SLeo Liu
800a119d53SSaleemkhan Jamadar amdgpu_bo_free_kernel(
810a119d53SSaleemkhan Jamadar &adev->jpeg.inst[i].dpg_sram_bo,
820a119d53SSaleemkhan Jamadar &adev->jpeg.inst[i].dpg_sram_gpu_addr,
830a119d53SSaleemkhan Jamadar (void **)&adev->jpeg.inst[i].dpg_sram_cpu_addr);
840a119d53SSaleemkhan Jamadar
85bc224553SJames Zhu for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
86bc224553SJames Zhu amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec[j]);
872eb16729SLeo Liu }
882eb16729SLeo Liu
89*df996b5eSSathishkumar S if (adev->jpeg.reg_list)
90*df996b5eSSathishkumar S amdgpu_jpeg_reg_dump_fini(adev);
91*df996b5eSSathishkumar S
92651a1465SJames Zhu mutex_destroy(&adev->jpeg.jpeg_pg_lock);
93651a1465SJames Zhu
942eb16729SLeo Liu return 0;
952eb16729SLeo Liu }
962eb16729SLeo Liu
amdgpu_jpeg_suspend(struct amdgpu_device * adev)972eb16729SLeo Liu int amdgpu_jpeg_suspend(struct amdgpu_device *adev)
982eb16729SLeo Liu {
992eb16729SLeo Liu cancel_delayed_work_sync(&adev->jpeg.idle_work);
1002eb16729SLeo Liu
1012eb16729SLeo Liu return 0;
1022eb16729SLeo Liu }
1032eb16729SLeo Liu
amdgpu_jpeg_resume(struct amdgpu_device * adev)1042eb16729SLeo Liu int amdgpu_jpeg_resume(struct amdgpu_device *adev)
1052eb16729SLeo Liu {
1062eb16729SLeo Liu return 0;
1072eb16729SLeo Liu }
1082eb16729SLeo Liu
amdgpu_jpeg_idle_work_handler(struct work_struct * work)1092eb16729SLeo Liu static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
1102eb16729SLeo Liu {
1112eb16729SLeo Liu struct amdgpu_device *adev =
1122eb16729SLeo Liu container_of(work, struct amdgpu_device, jpeg.idle_work.work);
1132eb16729SLeo Liu unsigned int fences = 0;
114bc224553SJames Zhu unsigned int i, j;
1152eb16729SLeo Liu
1162eb16729SLeo Liu for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
117408d2081SAdvait Dhamorikar if (adev->jpeg.harvest_config & (1U << i))
1182eb16729SLeo Liu continue;
1192eb16729SLeo Liu
120bc224553SJames Zhu for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
121bc224553SJames Zhu fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]);
1222eb16729SLeo Liu }
1232eb16729SLeo Liu
124651a1465SJames Zhu if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt))
1252eb16729SLeo Liu amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
1262eb16729SLeo Liu AMD_PG_STATE_GATE);
1272eb16729SLeo Liu else
1282eb16729SLeo Liu schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
1292eb16729SLeo Liu }
1302eb16729SLeo Liu
amdgpu_jpeg_ring_begin_use(struct amdgpu_ring * ring)1312eb16729SLeo Liu void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring)
1322eb16729SLeo Liu {
1332eb16729SLeo Liu struct amdgpu_device *adev = ring->adev;
1342eb16729SLeo Liu
135651a1465SJames Zhu atomic_inc(&adev->jpeg.total_submission_cnt);
136651a1465SJames Zhu cancel_delayed_work_sync(&adev->jpeg.idle_work);
137651a1465SJames Zhu
138651a1465SJames Zhu mutex_lock(&adev->jpeg.jpeg_pg_lock);
1392eb16729SLeo Liu amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
1402eb16729SLeo Liu AMD_PG_STATE_UNGATE);
141651a1465SJames Zhu mutex_unlock(&adev->jpeg.jpeg_pg_lock);
1422eb16729SLeo Liu }
1432eb16729SLeo Liu
amdgpu_jpeg_ring_end_use(struct amdgpu_ring * ring)1442eb16729SLeo Liu void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring)
1452eb16729SLeo Liu {
146651a1465SJames Zhu atomic_dec(&ring->adev->jpeg.total_submission_cnt);
1472eb16729SLeo Liu schedule_delayed_work(&ring->adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
1482eb16729SLeo Liu }
1492eb16729SLeo Liu
amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring * ring)1509d9cc9b8SLeo Liu int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring)
1519d9cc9b8SLeo Liu {
1529d9cc9b8SLeo Liu struct amdgpu_device *adev = ring->adev;
1539d9cc9b8SLeo Liu uint32_t tmp = 0;
1549d9cc9b8SLeo Liu unsigned i;
1559d9cc9b8SLeo Liu int r;
1569d9cc9b8SLeo Liu
157bf35dbc1SJane Jian /* JPEG in SRIOV does not support direct register read/write */
158bf35dbc1SJane Jian if (amdgpu_sriov_vf(adev))
159bf35dbc1SJane Jian return 0;
160bf35dbc1SJane Jian
1619d9cc9b8SLeo Liu r = amdgpu_ring_alloc(ring, 3);
1629d9cc9b8SLeo Liu if (r)
1639d9cc9b8SLeo Liu return r;
1649d9cc9b8SLeo Liu
16526dc0448SSonny Jiang WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe], 0xCAFEDEAD);
16626dc0448SSonny Jiang /* Add a read register to make sure the write register is executed. */
16726dc0448SSonny Jiang RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
16826dc0448SSonny Jiang
169bc224553SJames Zhu amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch[ring->pipe], 0));
170e7947c02SSonny Jiang amdgpu_ring_write(ring, 0xABADCAFE);
1719d9cc9b8SLeo Liu amdgpu_ring_commit(ring);
1729d9cc9b8SLeo Liu
1739d9cc9b8SLeo Liu for (i = 0; i < adev->usec_timeout; i++) {
174bc224553SJames Zhu tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
175e7947c02SSonny Jiang if (tmp == 0xABADCAFE)
1769d9cc9b8SLeo Liu break;
1779d9cc9b8SLeo Liu udelay(1);
1789d9cc9b8SLeo Liu }
1799d9cc9b8SLeo Liu
1809d9cc9b8SLeo Liu if (i >= adev->usec_timeout)
1819d9cc9b8SLeo Liu r = -ETIMEDOUT;
1829d9cc9b8SLeo Liu
1839d9cc9b8SLeo Liu return r;
1849d9cc9b8SLeo Liu }
1859d9cc9b8SLeo Liu
amdgpu_jpeg_dec_set_reg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)1869d9cc9b8SLeo Liu static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
1879d9cc9b8SLeo Liu struct dma_fence **fence)
1889d9cc9b8SLeo Liu {
1899d9cc9b8SLeo Liu struct amdgpu_device *adev = ring->adev;
1909d9cc9b8SLeo Liu struct amdgpu_job *job;
1919d9cc9b8SLeo Liu struct amdgpu_ib *ib;
1929d9cc9b8SLeo Liu struct dma_fence *f = NULL;
1939d9cc9b8SLeo Liu const unsigned ib_size_dw = 16;
1949d9cc9b8SLeo Liu int i, r;
1959d9cc9b8SLeo Liu
196f7d66fb2SChristian König r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
197c8e42d57Sxinhui pan AMDGPU_IB_POOL_DIRECT, &job);
1989d9cc9b8SLeo Liu if (r)
1999d9cc9b8SLeo Liu return r;
2009d9cc9b8SLeo Liu
2019d9cc9b8SLeo Liu ib = &job->ibs[0];
2029d9cc9b8SLeo Liu
203bc224553SJames Zhu ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch[ring->pipe], 0, 0, PACKETJ_TYPE0);
2049d9cc9b8SLeo Liu ib->ptr[1] = 0xDEADBEEF;
2059d9cc9b8SLeo Liu for (i = 2; i < 16; i += 2) {
2069d9cc9b8SLeo Liu ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
2079d9cc9b8SLeo Liu ib->ptr[i+1] = 0;
2089d9cc9b8SLeo Liu }
2099d9cc9b8SLeo Liu ib->length_dw = 16;
2109d9cc9b8SLeo Liu
2119d9cc9b8SLeo Liu r = amdgpu_job_submit_direct(job, ring, &f);
2129d9cc9b8SLeo Liu if (r)
2139d9cc9b8SLeo Liu goto err;
2149d9cc9b8SLeo Liu
2159d9cc9b8SLeo Liu if (fence)
2169d9cc9b8SLeo Liu *fence = dma_fence_get(f);
2179d9cc9b8SLeo Liu dma_fence_put(f);
2189d9cc9b8SLeo Liu
2199d9cc9b8SLeo Liu return 0;
2209d9cc9b8SLeo Liu
2219d9cc9b8SLeo Liu err:
2229d9cc9b8SLeo Liu amdgpu_job_free(job);
2239d9cc9b8SLeo Liu return r;
2249d9cc9b8SLeo Liu }
2259d9cc9b8SLeo Liu
amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring * ring,long timeout)2269d9cc9b8SLeo Liu int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2279d9cc9b8SLeo Liu {
2289d9cc9b8SLeo Liu struct amdgpu_device *adev = ring->adev;
2299d9cc9b8SLeo Liu uint32_t tmp = 0;
2309d9cc9b8SLeo Liu unsigned i;
2319d9cc9b8SLeo Liu struct dma_fence *fence = NULL;
2329d9cc9b8SLeo Liu long r = 0;
2339d9cc9b8SLeo Liu
2349d9cc9b8SLeo Liu r = amdgpu_jpeg_dec_set_reg(ring, 1, &fence);
2359d9cc9b8SLeo Liu if (r)
2369d9cc9b8SLeo Liu goto error;
2379d9cc9b8SLeo Liu
2389d9cc9b8SLeo Liu r = dma_fence_wait_timeout(fence, false, timeout);
2399d9cc9b8SLeo Liu if (r == 0) {
2409d9cc9b8SLeo Liu r = -ETIMEDOUT;
2419d9cc9b8SLeo Liu goto error;
2429d9cc9b8SLeo Liu } else if (r < 0) {
2439d9cc9b8SLeo Liu goto error;
2449d9cc9b8SLeo Liu } else {
2459d9cc9b8SLeo Liu r = 0;
2469d9cc9b8SLeo Liu }
2470a119d53SSaleemkhan Jamadar
248bf35dbc1SJane Jian if (!amdgpu_sriov_vf(adev)) {
2499d9cc9b8SLeo Liu for (i = 0; i < adev->usec_timeout; i++) {
250bc224553SJames Zhu tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
2519d9cc9b8SLeo Liu if (tmp == 0xDEADBEEF)
2529d9cc9b8SLeo Liu break;
2539d9cc9b8SLeo Liu udelay(1);
254dfad65c6SSonny Jiang if (amdgpu_emu_mode == 1)
255dfad65c6SSonny Jiang udelay(10);
2569d9cc9b8SLeo Liu }
2579d9cc9b8SLeo Liu
2589d9cc9b8SLeo Liu if (i >= adev->usec_timeout)
2599d9cc9b8SLeo Liu r = -ETIMEDOUT;
260bf35dbc1SJane Jian }
2619d9cc9b8SLeo Liu
2629d9cc9b8SLeo Liu dma_fence_put(fence);
2639d9cc9b8SLeo Liu error:
2649d9cc9b8SLeo Liu return r;
2659d9cc9b8SLeo Liu }
2667e0357fcSMohammad Zafar Ziya
amdgpu_jpeg_process_poison_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)2677e0357fcSMohammad Zafar Ziya int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
2687e0357fcSMohammad Zafar Ziya struct amdgpu_irq_src *source,
2697e0357fcSMohammad Zafar Ziya struct amdgpu_iv_entry *entry)
2707e0357fcSMohammad Zafar Ziya {
2717e0357fcSMohammad Zafar Ziya struct ras_common_if *ras_if = adev->jpeg.ras_if;
2727e0357fcSMohammad Zafar Ziya struct ras_dispatch_if ih_data = {
2737e0357fcSMohammad Zafar Ziya .entry = entry,
2747e0357fcSMohammad Zafar Ziya };
2757e0357fcSMohammad Zafar Ziya
2767e0357fcSMohammad Zafar Ziya if (!ras_if)
2777e0357fcSMohammad Zafar Ziya return 0;
2787e0357fcSMohammad Zafar Ziya
2797e0357fcSMohammad Zafar Ziya ih_data.head = *ras_if;
2807e0357fcSMohammad Zafar Ziya amdgpu_ras_interrupt_dispatch(adev, &ih_data);
2817e0357fcSMohammad Zafar Ziya
2827e0357fcSMohammad Zafar Ziya return 0;
2837e0357fcSMohammad Zafar Ziya }
284214c7642STao Zhou
amdgpu_jpeg_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)28518dad20cSHoratio Zhang int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
28618dad20cSHoratio Zhang {
28718dad20cSHoratio Zhang int r, i;
28818dad20cSHoratio Zhang
28918dad20cSHoratio Zhang r = amdgpu_ras_block_late_init(adev, ras_block);
29018dad20cSHoratio Zhang if (r)
29118dad20cSHoratio Zhang return r;
29218dad20cSHoratio Zhang
29318dad20cSHoratio Zhang if (amdgpu_ras_is_supported(adev, ras_block->block)) {
29418dad20cSHoratio Zhang for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
2954ff96bccSTao Zhou if (adev->jpeg.harvest_config & (1 << i) ||
2964ff96bccSTao Zhou !adev->jpeg.inst[i].ras_poison_irq.funcs)
29718dad20cSHoratio Zhang continue;
29818dad20cSHoratio Zhang
29918dad20cSHoratio Zhang r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
30018dad20cSHoratio Zhang if (r)
30118dad20cSHoratio Zhang goto late_fini;
30218dad20cSHoratio Zhang }
30318dad20cSHoratio Zhang }
30418dad20cSHoratio Zhang return 0;
30518dad20cSHoratio Zhang
30618dad20cSHoratio Zhang late_fini:
30718dad20cSHoratio Zhang amdgpu_ras_block_late_fini(adev, ras_block);
30818dad20cSHoratio Zhang return r;
30918dad20cSHoratio Zhang }
31018dad20cSHoratio Zhang
amdgpu_jpeg_ras_sw_init(struct amdgpu_device * adev)3115640e06eSHawking Zhang int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
312214c7642STao Zhou {
3135640e06eSHawking Zhang int err;
3145640e06eSHawking Zhang struct amdgpu_jpeg_ras *ras;
3155640e06eSHawking Zhang
316214c7642STao Zhou if (!adev->jpeg.ras)
3175640e06eSHawking Zhang return 0;
318214c7642STao Zhou
3195640e06eSHawking Zhang ras = adev->jpeg.ras;
3205640e06eSHawking Zhang err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
3215640e06eSHawking Zhang if (err) {
3225640e06eSHawking Zhang dev_err(adev->dev, "Failed to register jpeg ras block!\n");
3235640e06eSHawking Zhang return err;
3245640e06eSHawking Zhang }
325214c7642STao Zhou
3265640e06eSHawking Zhang strcpy(ras->ras_block.ras_comm.name, "jpeg");
3275640e06eSHawking Zhang ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
3285640e06eSHawking Zhang ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
3295640e06eSHawking Zhang adev->jpeg.ras_if = &ras->ras_block.ras_comm;
330214c7642STao Zhou
3315640e06eSHawking Zhang if (!ras->ras_block.ras_late_init)
33218dad20cSHoratio Zhang ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init;
3335640e06eSHawking Zhang
3345640e06eSHawking Zhang return 0;
335214c7642STao Zhou }
3360a119d53SSaleemkhan Jamadar
amdgpu_jpeg_psp_update_sram(struct amdgpu_device * adev,int inst_idx,enum AMDGPU_UCODE_ID ucode_id)3370a119d53SSaleemkhan Jamadar int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
3380a119d53SSaleemkhan Jamadar enum AMDGPU_UCODE_ID ucode_id)
3390a119d53SSaleemkhan Jamadar {
3400a119d53SSaleemkhan Jamadar struct amdgpu_firmware_info ucode = {
3410a119d53SSaleemkhan Jamadar .ucode_id = AMDGPU_UCODE_ID_JPEG_RAM,
3420a119d53SSaleemkhan Jamadar .mc_addr = adev->jpeg.inst[inst_idx].dpg_sram_gpu_addr,
3430a119d53SSaleemkhan Jamadar .ucode_size = ((uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_curr_addr -
3440a119d53SSaleemkhan Jamadar (uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr),
3450a119d53SSaleemkhan Jamadar };
3460a119d53SSaleemkhan Jamadar
3470a119d53SSaleemkhan Jamadar return psp_execute_ip_fw_load(&adev->psp, &ucode);
3480a119d53SSaleemkhan Jamadar }
349f0b19b84SSathishkumar S
350f0b19b84SSathishkumar S /*
351f0b19b84SSathishkumar S * debugfs for to enable/disable jpeg job submission to specific core.
352f0b19b84SSathishkumar S */
353f0b19b84SSathishkumar S #if defined(CONFIG_DEBUG_FS)
amdgpu_debugfs_jpeg_sched_mask_set(void * data,u64 val)354f0b19b84SSathishkumar S static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
355f0b19b84SSathishkumar S {
356f0b19b84SSathishkumar S struct amdgpu_device *adev = (struct amdgpu_device *)data;
357f0b19b84SSathishkumar S u32 i, j;
358f0b19b84SSathishkumar S u64 mask = 0;
359f0b19b84SSathishkumar S struct amdgpu_ring *ring;
360f0b19b84SSathishkumar S
361f0b19b84SSathishkumar S if (!adev)
362f0b19b84SSathishkumar S return -ENODEV;
363f0b19b84SSathishkumar S
364408d2081SAdvait Dhamorikar mask = (1ULL << (adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings)) - 1;
365f0b19b84SSathishkumar S if ((val & mask) == 0)
366f0b19b84SSathishkumar S return -EINVAL;
367f0b19b84SSathishkumar S
368f0b19b84SSathishkumar S for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
369f0b19b84SSathishkumar S for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
370f0b19b84SSathishkumar S ring = &adev->jpeg.inst[i].ring_dec[j];
371f0b19b84SSathishkumar S if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j)))
372f0b19b84SSathishkumar S ring->sched.ready = true;
373f0b19b84SSathishkumar S else
374f0b19b84SSathishkumar S ring->sched.ready = false;
375f0b19b84SSathishkumar S }
376f0b19b84SSathishkumar S }
377f0b19b84SSathishkumar S /* publish sched.ready flag update effective immediately across smp */
378f0b19b84SSathishkumar S smp_rmb();
379f0b19b84SSathishkumar S return 0;
380f0b19b84SSathishkumar S }
381f0b19b84SSathishkumar S
amdgpu_debugfs_jpeg_sched_mask_get(void * data,u64 * val)382f0b19b84SSathishkumar S static int amdgpu_debugfs_jpeg_sched_mask_get(void *data, u64 *val)
383f0b19b84SSathishkumar S {
384f0b19b84SSathishkumar S struct amdgpu_device *adev = (struct amdgpu_device *)data;
385f0b19b84SSathishkumar S u32 i, j;
386f0b19b84SSathishkumar S u64 mask = 0;
387f0b19b84SSathishkumar S struct amdgpu_ring *ring;
388f0b19b84SSathishkumar S
389f0b19b84SSathishkumar S if (!adev)
390f0b19b84SSathishkumar S return -ENODEV;
391f0b19b84SSathishkumar S for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
392f0b19b84SSathishkumar S for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
393f0b19b84SSathishkumar S ring = &adev->jpeg.inst[i].ring_dec[j];
394f0b19b84SSathishkumar S if (ring->sched.ready)
395408d2081SAdvait Dhamorikar mask |= 1ULL << ((i * adev->jpeg.num_jpeg_rings) + j);
396f0b19b84SSathishkumar S }
397f0b19b84SSathishkumar S }
398f0b19b84SSathishkumar S *val = mask;
399f0b19b84SSathishkumar S return 0;
400f0b19b84SSathishkumar S }
401f0b19b84SSathishkumar S
402f0b19b84SSathishkumar S DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_jpeg_sched_mask_fops,
403f0b19b84SSathishkumar S amdgpu_debugfs_jpeg_sched_mask_get,
404f0b19b84SSathishkumar S amdgpu_debugfs_jpeg_sched_mask_set, "%llx\n");
405f0b19b84SSathishkumar S
406f0b19b84SSathishkumar S #endif
407f0b19b84SSathishkumar S
amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device * adev)408f0b19b84SSathishkumar S void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev)
409f0b19b84SSathishkumar S {
410f0b19b84SSathishkumar S #if defined(CONFIG_DEBUG_FS)
411f0b19b84SSathishkumar S struct drm_minor *minor = adev_to_drm(adev)->primary;
412f0b19b84SSathishkumar S struct dentry *root = minor->debugfs_root;
413f0b19b84SSathishkumar S char name[32];
414f0b19b84SSathishkumar S
415f0b19b84SSathishkumar S if (!(adev->jpeg.num_jpeg_inst > 1) && !(adev->jpeg.num_jpeg_rings > 1))
416f0b19b84SSathishkumar S return;
417f0b19b84SSathishkumar S sprintf(name, "amdgpu_jpeg_sched_mask");
418f0b19b84SSathishkumar S debugfs_create_file(name, 0600, root, adev,
419f0b19b84SSathishkumar S &amdgpu_debugfs_jpeg_sched_mask_fops);
420f0b19b84SSathishkumar S #endif
421f0b19b84SSathishkumar S }
42296f0b56cS[email protected]
amdgpu_get_jpeg_reset_mask(struct device * dev,struct device_attribute * attr,char * buf)42396f0b56cS[email protected] static ssize_t amdgpu_get_jpeg_reset_mask(struct device *dev,
42496f0b56cS[email protected] struct device_attribute *attr,
42596f0b56cS[email protected] char *buf)
42696f0b56cS[email protected] {
42796f0b56cS[email protected] struct drm_device *ddev = dev_get_drvdata(dev);
42896f0b56cS[email protected] struct amdgpu_device *adev = drm_to_adev(ddev);
42996f0b56cS[email protected]
43096f0b56cS[email protected] if (!adev)
43196f0b56cS[email protected] return -ENODEV;
43296f0b56cS[email protected]
43396f0b56cS[email protected] return amdgpu_show_reset_mask(buf, adev->jpeg.supported_reset);
43496f0b56cS[email protected] }
43596f0b56cS[email protected]
43696f0b56cS[email protected] static DEVICE_ATTR(jpeg_reset_mask, 0444,
43796f0b56cS[email protected] amdgpu_get_jpeg_reset_mask, NULL);
43896f0b56cS[email protected]
amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device * adev)43996f0b56cS[email protected] int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev)
44096f0b56cS[email protected] {
44196f0b56cS[email protected] int r = 0;
44296f0b56cS[email protected]
44396f0b56cS[email protected] if (adev->jpeg.num_jpeg_inst) {
44496f0b56cS[email protected] r = device_create_file(adev->dev, &dev_attr_jpeg_reset_mask);
44596f0b56cS[email protected] if (r)
44696f0b56cS[email protected] return r;
44796f0b56cS[email protected] }
44896f0b56cS[email protected]
44996f0b56cS[email protected] return r;
45096f0b56cS[email protected] }
45196f0b56cS[email protected]
amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device * adev)45296f0b56cS[email protected] void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev)
45396f0b56cS[email protected] {
4542f1b1352S[email protected] if (adev->dev->kobj.sd) {
45596f0b56cS[email protected] if (adev->jpeg.num_jpeg_inst)
45696f0b56cS[email protected] device_remove_file(adev->dev, &dev_attr_jpeg_reset_mask);
45796f0b56cS[email protected] }
4582f1b1352S[email protected] }
459*df996b5eSSathishkumar S
amdgpu_jpeg_reg_dump_init(struct amdgpu_device * adev,const struct amdgpu_hwip_reg_entry * reg,u32 count)460*df996b5eSSathishkumar S int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
461*df996b5eSSathishkumar S const struct amdgpu_hwip_reg_entry *reg, u32 count)
462*df996b5eSSathishkumar S {
463*df996b5eSSathishkumar S adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count,
464*df996b5eSSathishkumar S sizeof(uint32_t), GFP_KERNEL);
465*df996b5eSSathishkumar S if (!adev->jpeg.ip_dump) {
466*df996b5eSSathishkumar S DRM_ERROR("Failed to allocate memory for JPEG IP Dump\n");
467*df996b5eSSathishkumar S return -ENOMEM;
468*df996b5eSSathishkumar S }
469*df996b5eSSathishkumar S adev->jpeg.reg_list = reg;
470*df996b5eSSathishkumar S adev->jpeg.reg_count = count;
471*df996b5eSSathishkumar S
472*df996b5eSSathishkumar S return 0;
473*df996b5eSSathishkumar S }
474*df996b5eSSathishkumar S
amdgpu_jpeg_reg_dump_fini(struct amdgpu_device * adev)475*df996b5eSSathishkumar S static void amdgpu_jpeg_reg_dump_fini(struct amdgpu_device *adev)
476*df996b5eSSathishkumar S {
477*df996b5eSSathishkumar S kfree(adev->jpeg.ip_dump);
478*df996b5eSSathishkumar S adev->jpeg.reg_list = NULL;
479*df996b5eSSathishkumar S adev->jpeg.reg_count = 0;
480*df996b5eSSathishkumar S }
481*df996b5eSSathishkumar S
amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block * ip_block)482*df996b5eSSathishkumar S void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block)
483*df996b5eSSathishkumar S {
484*df996b5eSSathishkumar S struct amdgpu_device *adev = ip_block->adev;
485*df996b5eSSathishkumar S u32 inst_off, inst_id, is_powered;
486*df996b5eSSathishkumar S int i, j;
487*df996b5eSSathishkumar S
488*df996b5eSSathishkumar S if (!adev->jpeg.ip_dump)
489*df996b5eSSathishkumar S return;
490*df996b5eSSathishkumar S
491*df996b5eSSathishkumar S for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
492*df996b5eSSathishkumar S if (adev->jpeg.harvest_config & (1 << i))
493*df996b5eSSathishkumar S continue;
494*df996b5eSSathishkumar S
495*df996b5eSSathishkumar S inst_id = GET_INST(JPEG, i);
496*df996b5eSSathishkumar S inst_off = i * adev->jpeg.reg_count;
497*df996b5eSSathishkumar S /* check power status from UVD_JPEG_POWER_STATUS */
498*df996b5eSSathishkumar S adev->jpeg.ip_dump[inst_off] =
499*df996b5eSSathishkumar S RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->jpeg.reg_list[0],
500*df996b5eSSathishkumar S inst_id));
501*df996b5eSSathishkumar S is_powered = ((adev->jpeg.ip_dump[inst_off] & 0x1) != 1);
502*df996b5eSSathishkumar S
503*df996b5eSSathishkumar S if (is_powered)
504*df996b5eSSathishkumar S for (j = 1; j < adev->jpeg.reg_count; j++)
505*df996b5eSSathishkumar S adev->jpeg.ip_dump[inst_off + j] =
506*df996b5eSSathishkumar S RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->jpeg.reg_list[j],
507*df996b5eSSathishkumar S inst_id));
508*df996b5eSSathishkumar S }
509*df996b5eSSathishkumar S }
510*df996b5eSSathishkumar S
amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block * ip_block,struct drm_printer * p)511*df996b5eSSathishkumar S void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
512*df996b5eSSathishkumar S {
513*df996b5eSSathishkumar S struct amdgpu_device *adev = ip_block->adev;
514*df996b5eSSathishkumar S u32 inst_off, is_powered;
515*df996b5eSSathishkumar S int i, j;
516*df996b5eSSathishkumar S
517*df996b5eSSathishkumar S if (!adev->jpeg.ip_dump)
518*df996b5eSSathishkumar S return;
519*df996b5eSSathishkumar S
520*df996b5eSSathishkumar S drm_printf(p, "num_instances:%d\n", adev->jpeg.num_jpeg_inst);
521*df996b5eSSathishkumar S for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
522*df996b5eSSathishkumar S if (adev->jpeg.harvest_config & (1 << i)) {
523*df996b5eSSathishkumar S drm_printf(p, "\nHarvested Instance:JPEG%d Skipping dump\n", i);
524*df996b5eSSathishkumar S continue;
525*df996b5eSSathishkumar S }
526*df996b5eSSathishkumar S
527*df996b5eSSathishkumar S inst_off = i * adev->jpeg.reg_count;
528*df996b5eSSathishkumar S is_powered = ((adev->jpeg.ip_dump[inst_off] & 0x1) != 1);
529*df996b5eSSathishkumar S
530*df996b5eSSathishkumar S if (is_powered) {
531*df996b5eSSathishkumar S drm_printf(p, "Active Instance:JPEG%d\n", i);
532*df996b5eSSathishkumar S for (j = 0; j < adev->jpeg.reg_count; j++)
533*df996b5eSSathishkumar S drm_printf(p, "%-50s \t 0x%08x\n", adev->jpeg.reg_list[j].reg_name,
534*df996b5eSSathishkumar S adev->jpeg.ip_dump[inst_off + j]);
535*df996b5eSSathishkumar S } else
536*df996b5eSSathishkumar S drm_printf(p, "\nInactive Instance:JPEG%d\n", i);
537*df996b5eSSathishkumar S }
538*df996b5eSSathishkumar S }
539