13636169cSJiange Zhao /*
23636169cSJiange Zhao * Copyright 2014 Advanced Micro Devices, Inc.
33636169cSJiange Zhao *
43636169cSJiange Zhao * Permission is hereby granted, free of charge, to any person obtaining a
53636169cSJiange Zhao * copy of this software and associated documentation files (the "Software"),
63636169cSJiange Zhao * to deal in the Software without restriction, including without limitation
73636169cSJiange Zhao * the rights to use, copy, modify, merge, publish, distribute, sublicense,
83636169cSJiange Zhao * and/or sell copies of the Software, and to permit persons to whom the
93636169cSJiange Zhao * Software is furnished to do so, subject to the following conditions:
103636169cSJiange Zhao *
113636169cSJiange Zhao * The above copyright notice and this permission notice shall be included in
123636169cSJiange Zhao * all copies or substantial portions of the Software.
133636169cSJiange Zhao *
143636169cSJiange Zhao * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
153636169cSJiange Zhao * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
163636169cSJiange Zhao * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
173636169cSJiange Zhao * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
183636169cSJiange Zhao * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
193636169cSJiange Zhao * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
203636169cSJiange Zhao * OTHER DEALINGS IN THE SOFTWARE.
213636169cSJiange Zhao *
223636169cSJiange Zhao */
233636169cSJiange Zhao
243636169cSJiange Zhao #include "amdgpu.h"
253636169cSJiange Zhao #include "nbio/nbio_2_3_offset.h"
263636169cSJiange Zhao #include "nbio/nbio_2_3_sh_mask.h"
273636169cSJiange Zhao #include "gc/gc_10_1_0_offset.h"
283636169cSJiange Zhao #include "gc/gc_10_1_0_sh_mask.h"
293636169cSJiange Zhao #include "soc15.h"
303636169cSJiange Zhao #include "navi10_ih.h"
313636169cSJiange Zhao #include "soc15_common.h"
323636169cSJiange Zhao #include "mxgpu_nv.h"
333636169cSJiange Zhao
34cfbb6b00SAndrey Grodzovsky #include "amdgpu_reset.h"
35cfbb6b00SAndrey Grodzovsky
xgpu_nv_mailbox_send_ack(struct amdgpu_device * adev)363636169cSJiange Zhao static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
373636169cSJiange Zhao {
383636169cSJiange Zhao WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
393636169cSJiange Zhao }
403636169cSJiange Zhao
xgpu_nv_mailbox_set_valid(struct amdgpu_device * adev,bool val)413636169cSJiange Zhao static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
423636169cSJiange Zhao {
433636169cSJiange Zhao WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
443636169cSJiange Zhao }
453636169cSJiange Zhao
463636169cSJiange Zhao /*
473636169cSJiange Zhao * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
483636169cSJiange Zhao * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
493636169cSJiange Zhao * by host.
503636169cSJiange Zhao *
513636169cSJiange Zhao * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
523636169cSJiange Zhao * correct value since it doesn't return the RCV_DW0 under the case that
533636169cSJiange Zhao * RCV_MSG_VALID is set by host.
543636169cSJiange Zhao */
xgpu_nv_mailbox_peek_msg(struct amdgpu_device * adev)553636169cSJiange Zhao static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
563636169cSJiange Zhao {
57ff1f03a7SMonk Liu return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
583636169cSJiange Zhao }
593636169cSJiange Zhao
603636169cSJiange Zhao
xgpu_nv_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)613636169cSJiange Zhao static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
623636169cSJiange Zhao enum idh_event event)
633636169cSJiange Zhao {
649928509dSVictor Skvortsov int r = 0;
653636169cSJiange Zhao u32 reg;
663636169cSJiange Zhao
67ff1f03a7SMonk Liu reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
689928509dSVictor Skvortsov if (reg == IDH_FAIL)
699928509dSVictor Skvortsov r = -EINVAL;
709928509dSVictor Skvortsov else if (reg != event)
713636169cSJiange Zhao return -ENOENT;
723636169cSJiange Zhao
733636169cSJiange Zhao xgpu_nv_mailbox_send_ack(adev);
743636169cSJiange Zhao
759928509dSVictor Skvortsov return r;
763636169cSJiange Zhao }
773636169cSJiange Zhao
xgpu_nv_peek_ack(struct amdgpu_device * adev)783636169cSJiange Zhao static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
793636169cSJiange Zhao {
803636169cSJiange Zhao return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
813636169cSJiange Zhao }
823636169cSJiange Zhao
xgpu_nv_poll_ack(struct amdgpu_device * adev)833636169cSJiange Zhao static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
843636169cSJiange Zhao {
853636169cSJiange Zhao int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
863636169cSJiange Zhao u8 reg;
873636169cSJiange Zhao
883636169cSJiange Zhao do {
893636169cSJiange Zhao reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
903636169cSJiange Zhao if (reg & 2)
913636169cSJiange Zhao return 0;
923636169cSJiange Zhao
933636169cSJiange Zhao mdelay(5);
943636169cSJiange Zhao timeout -= 5;
953636169cSJiange Zhao } while (timeout > 1);
963636169cSJiange Zhao
9729b6985dSVignesh Chander dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
983636169cSJiange Zhao
993636169cSJiange Zhao return -ETIME;
1003636169cSJiange Zhao }
1013636169cSJiange Zhao
xgpu_nv_poll_msg(struct amdgpu_device * adev,enum idh_event event)1023636169cSJiange Zhao static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
1033636169cSJiange Zhao {
104124e8b19SVictor Zhao int r;
105124e8b19SVictor Zhao uint64_t timeout, now;
106124e8b19SVictor Zhao
107124e8b19SVictor Zhao now = (uint64_t)ktime_to_ms(ktime_get());
108124e8b19SVictor Zhao timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
1093636169cSJiange Zhao
1103636169cSJiange Zhao do {
1113636169cSJiange Zhao r = xgpu_nv_mailbox_rcv_msg(adev, event);
11229b6985dSVignesh Chander if (!r) {
11329b6985dSVignesh Chander dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
1143636169cSJiange Zhao return 0;
11529b6985dSVignesh Chander }
1163636169cSJiange Zhao
1173636169cSJiange Zhao msleep(10);
118124e8b19SVictor Zhao now = (uint64_t)ktime_to_ms(ktime_get());
119124e8b19SVictor Zhao } while (timeout > now);
1203636169cSJiange Zhao
12129b6985dSVignesh Chander dev_dbg(adev->dev, "nv_poll_msg timed out\n");
1223636169cSJiange Zhao
1233636169cSJiange Zhao return -ETIME;
1243636169cSJiange Zhao }
1253636169cSJiange Zhao
xgpu_nv_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)1263636169cSJiange Zhao static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
1273636169cSJiange Zhao enum idh_request req, u32 data1, u32 data2, u32 data3)
1283636169cSJiange Zhao {
1293636169cSJiange Zhao int r;
1303636169cSJiange Zhao uint8_t trn;
1313636169cSJiange Zhao
1323636169cSJiange Zhao /* IMPORTANT:
1333636169cSJiange Zhao * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
1343636169cSJiange Zhao * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
1353636169cSJiange Zhao * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
1363636169cSJiange Zhao * will return immediatly
1373636169cSJiange Zhao */
1383636169cSJiange Zhao do {
1393636169cSJiange Zhao xgpu_nv_mailbox_set_valid(adev, false);
1403636169cSJiange Zhao trn = xgpu_nv_peek_ack(adev);
1413636169cSJiange Zhao if (trn) {
14229b6985dSVignesh Chander dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn);
1433636169cSJiange Zhao msleep(1);
1443636169cSJiange Zhao }
1453636169cSJiange Zhao } while (trn);
1463636169cSJiange Zhao
14729b6985dSVignesh Chander dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1);
148ff1f03a7SMonk Liu WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
149ff1f03a7SMonk Liu WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
150ff1f03a7SMonk Liu WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
151ff1f03a7SMonk Liu WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
1523636169cSJiange Zhao xgpu_nv_mailbox_set_valid(adev, true);
1533636169cSJiange Zhao
1543636169cSJiange Zhao /* start to poll ack */
1553636169cSJiange Zhao r = xgpu_nv_poll_ack(adev);
1563636169cSJiange Zhao if (r)
15729b6985dSVignesh Chander dev_err(adev->dev, "Doesn't get ack from pf, continue\n");
1583636169cSJiange Zhao
1593636169cSJiange Zhao xgpu_nv_mailbox_set_valid(adev, false);
1603636169cSJiange Zhao }
1613636169cSJiange Zhao
xgpu_nv_send_access_requests_with_param(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)162ed1e1e42SYiPeng Chai static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
163ed1e1e42SYiPeng Chai enum idh_request req, u32 data1, u32 data2, u32 data3)
1643636169cSJiange Zhao {
165124e8b19SVictor Zhao int r, retry = 1;
166aa53bc2eSMonk Liu enum idh_event event = -1;
1673636169cSJiange Zhao
168124e8b19SVictor Zhao send_request:
169ed1e1e42SYiPeng Chai xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
1703636169cSJiange Zhao
171aa53bc2eSMonk Liu switch (req) {
172aa53bc2eSMonk Liu case IDH_REQ_GPU_INIT_ACCESS:
173aa53bc2eSMonk Liu case IDH_REQ_GPU_FINI_ACCESS:
174aa53bc2eSMonk Liu case IDH_REQ_GPU_RESET_ACCESS:
175aa53bc2eSMonk Liu event = IDH_READY_TO_ACCESS_GPU;
176aa53bc2eSMonk Liu break;
177aa53bc2eSMonk Liu case IDH_REQ_GPU_INIT_DATA:
178aa53bc2eSMonk Liu event = IDH_REQ_GPU_INIT_DATA_READY;
179aa53bc2eSMonk Liu break;
1802474414cSVictor Skvortsov case IDH_RAS_POISON:
1812474414cSVictor Skvortsov if (data1 != 0)
1822474414cSVictor Skvortsov event = IDH_RAS_POISON_READY;
1832474414cSVictor Skvortsov break;
1849928509dSVictor Skvortsov case IDH_REQ_RAS_ERROR_COUNT:
1859928509dSVictor Skvortsov event = IDH_RAS_ERROR_COUNT_READY;
1869928509dSVictor Skvortsov break;
187*a91d91b6STony Yi case IDH_REQ_RAS_CPER_DUMP:
188*a91d91b6STony Yi event = IDH_RAS_CPER_DUMP_READY;
189*a91d91b6STony Yi break;
190aa53bc2eSMonk Liu default:
191aa53bc2eSMonk Liu break;
192aa53bc2eSMonk Liu }
193aa53bc2eSMonk Liu
194aa53bc2eSMonk Liu if (event != -1) {
195aa53bc2eSMonk Liu r = xgpu_nv_poll_msg(adev, event);
1963636169cSJiange Zhao if (r) {
197f5007c67SZhigang Luo if (retry++ < 5)
198124e8b19SVictor Zhao goto send_request;
199124e8b19SVictor Zhao
200aa53bc2eSMonk Liu if (req != IDH_REQ_GPU_INIT_DATA) {
20129b6985dSVignesh Chander dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
2023636169cSJiange Zhao return r;
203e2515e2bSRan Sun } else /* host doesn't support REQ_GPU_INIT_DATA handshake */
204aa53bc2eSMonk Liu adev->virt.req_init_data_ver = 0;
205aa53bc2eSMonk Liu } else {
206e2515e2bSRan Sun if (req == IDH_REQ_GPU_INIT_DATA) {
207aa53bc2eSMonk Liu adev->virt.req_init_data_ver =
208ff1f03a7SMonk Liu RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
209aa53bc2eSMonk Liu
210aa53bc2eSMonk Liu /* assume V1 in case host doesn't set version number */
211aa53bc2eSMonk Liu if (adev->virt.req_init_data_ver < 1)
212aa53bc2eSMonk Liu adev->virt.req_init_data_ver = 1;
213aa53bc2eSMonk Liu }
214aa53bc2eSMonk Liu }
215aa53bc2eSMonk Liu
2163636169cSJiange Zhao /* Retrieve checksum from mailbox2 */
2173636169cSJiange Zhao if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
2183636169cSJiange Zhao adev->virt.fw_reserve.checksum_key =
219ff1f03a7SMonk Liu RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
2203636169cSJiange Zhao }
2213636169cSJiange Zhao }
2223636169cSJiange Zhao
2233636169cSJiange Zhao return 0;
2243636169cSJiange Zhao }
2253636169cSJiange Zhao
xgpu_nv_send_access_requests(struct amdgpu_device * adev,enum idh_request req)226ed1e1e42SYiPeng Chai static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
227ed1e1e42SYiPeng Chai enum idh_request req)
228ed1e1e42SYiPeng Chai {
229ed1e1e42SYiPeng Chai return xgpu_nv_send_access_requests_with_param(adev,
230ed1e1e42SYiPeng Chai req, 0, 0, 0);
231ed1e1e42SYiPeng Chai }
232ed1e1e42SYiPeng Chai
xgpu_nv_request_reset(struct amdgpu_device * adev)2333636169cSJiange Zhao static int xgpu_nv_request_reset(struct amdgpu_device *adev)
2343636169cSJiange Zhao {
2353aa883acSJiange Zhao int ret, i = 0;
2363aa883acSJiange Zhao
2373aa883acSJiange Zhao while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
2383aa883acSJiange Zhao ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
2393aa883acSJiange Zhao if (!ret)
2403aa883acSJiange Zhao break;
2413aa883acSJiange Zhao i++;
2423aa883acSJiange Zhao }
2433aa883acSJiange Zhao
2443aa883acSJiange Zhao return ret;
2453636169cSJiange Zhao }
2463636169cSJiange Zhao
xgpu_nv_request_full_gpu_access(struct amdgpu_device * adev,bool init)2473636169cSJiange Zhao static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
2483636169cSJiange Zhao bool init)
2493636169cSJiange Zhao {
2503636169cSJiange Zhao enum idh_request req;
2513636169cSJiange Zhao
2523636169cSJiange Zhao req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
2533636169cSJiange Zhao return xgpu_nv_send_access_requests(adev, req);
2543636169cSJiange Zhao }
2553636169cSJiange Zhao
xgpu_nv_release_full_gpu_access(struct amdgpu_device * adev,bool init)2563636169cSJiange Zhao static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
2573636169cSJiange Zhao bool init)
2583636169cSJiange Zhao {
2593636169cSJiange Zhao enum idh_request req;
2603636169cSJiange Zhao int r = 0;
2613636169cSJiange Zhao
2623636169cSJiange Zhao req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
2633636169cSJiange Zhao r = xgpu_nv_send_access_requests(adev, req);
2643636169cSJiange Zhao
2653636169cSJiange Zhao return r;
2663636169cSJiange Zhao }
2673636169cSJiange Zhao
xgpu_nv_request_init_data(struct amdgpu_device * adev)268aa53bc2eSMonk Liu static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
269aa53bc2eSMonk Liu {
270aa53bc2eSMonk Liu return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
271aa53bc2eSMonk Liu }
272aa53bc2eSMonk Liu
xgpu_nv_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)2733636169cSJiange Zhao static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
2743636169cSJiange Zhao struct amdgpu_irq_src *source,
2753636169cSJiange Zhao struct amdgpu_iv_entry *entry)
2763636169cSJiange Zhao {
27729b6985dSVignesh Chander dev_dbg(adev->dev, "get ack intr and do nothing.\n");
2783636169cSJiange Zhao return 0;
2793636169cSJiange Zhao }
2803636169cSJiange Zhao
xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)2813636169cSJiange Zhao static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
2823636169cSJiange Zhao struct amdgpu_irq_src *source,
2833636169cSJiange Zhao unsigned type,
2843636169cSJiange Zhao enum amdgpu_interrupt_state state)
2853636169cSJiange Zhao {
286ff1f03a7SMonk Liu u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
2873636169cSJiange Zhao
288ff1f03a7SMonk Liu if (state == AMDGPU_IRQ_STATE_ENABLE)
289ff1f03a7SMonk Liu tmp |= 2;
290ff1f03a7SMonk Liu else
291ff1f03a7SMonk Liu tmp &= ~2;
292ff1f03a7SMonk Liu
293ff1f03a7SMonk Liu WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
2943636169cSJiange Zhao
2953636169cSJiange Zhao return 0;
2963636169cSJiange Zhao }
2973636169cSJiange Zhao
xgpu_nv_ready_to_reset(struct amdgpu_device * adev)2985c0a1cddSYunxiang Li static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev)
2995c0a1cddSYunxiang Li {
3005c0a1cddSYunxiang Li xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
3015c0a1cddSYunxiang Li }
3025c0a1cddSYunxiang Li
xgpu_nv_wait_reset(struct amdgpu_device * adev)3035c0a1cddSYunxiang Li static int xgpu_nv_wait_reset(struct amdgpu_device *adev)
3045c0a1cddSYunxiang Li {
3055c0a1cddSYunxiang Li int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
3065c0a1cddSYunxiang Li do {
30729b6985dSVignesh Chander if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) {
30829b6985dSVignesh Chander dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT - timeout);
3095c0a1cddSYunxiang Li return 0;
31029b6985dSVignesh Chander }
3115c0a1cddSYunxiang Li msleep(10);
3125c0a1cddSYunxiang Li timeout -= 10;
3135c0a1cddSYunxiang Li } while (timeout > 1);
31429b6985dSVignesh Chander
31529b6985dSVignesh Chander dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n");
3165c0a1cddSYunxiang Li return -ETIME;
3175c0a1cddSYunxiang Li }
3185c0a1cddSYunxiang Li
xgpu_nv_mailbox_flr_work(struct work_struct * work)3193636169cSJiange Zhao static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
3203636169cSJiange Zhao {
3213636169cSJiange Zhao struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
3223636169cSJiange Zhao struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
323fa4a427dSVictor Skvortsov
3243c2a01cbSJack Zhang amdgpu_virt_fini_data_exchange(adev);
325f1403342SChristian König
3263636169cSJiange Zhao /* Trigger recovery for world switch failure if no TDR */
3271512d064SMonk Liu if (amdgpu_device_should_recover_gpu(adev)
3282a9787dcSLiu ChengZhe && (!amdgpu_device_has_job_running(adev) ||
3299a1cddd6Sjqdeng adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
3301512d064SMonk Liu adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
3311512d064SMonk Liu adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
332f1549c09SLikun Gao adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
333f1549c09SLikun Gao struct amdgpu_reset_context reset_context;
334f1549c09SLikun Gao memset(&reset_context, 0, sizeof(reset_context));
335f1549c09SLikun Gao
336f1549c09SLikun Gao reset_context.method = AMD_RESET_METHOD_NONE;
337f1549c09SLikun Gao reset_context.reset_req_dev = adev;
338f1549c09SLikun Gao clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
33925c01191SYunxiang Li set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
340f1549c09SLikun Gao
341f1549c09SLikun Gao amdgpu_device_gpu_recover(adev, NULL, &reset_context);
342f1549c09SLikun Gao }
3433636169cSJiange Zhao }
3443636169cSJiange Zhao
xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3453636169cSJiange Zhao static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
3463636169cSJiange Zhao struct amdgpu_irq_src *src,
3473636169cSJiange Zhao unsigned type,
3483636169cSJiange Zhao enum amdgpu_interrupt_state state)
3493636169cSJiange Zhao {
350ff1f03a7SMonk Liu u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
3513636169cSJiange Zhao
352ff1f03a7SMonk Liu if (state == AMDGPU_IRQ_STATE_ENABLE)
353ff1f03a7SMonk Liu tmp |= 1;
354ff1f03a7SMonk Liu else
355ff1f03a7SMonk Liu tmp &= ~1;
356ff1f03a7SMonk Liu
357ff1f03a7SMonk Liu WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
3583636169cSJiange Zhao
3593636169cSJiange Zhao return 0;
3603636169cSJiange Zhao }
3613636169cSJiange Zhao
xgpu_nv_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3623636169cSJiange Zhao static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
3633636169cSJiange Zhao struct amdgpu_irq_src *source,
3643636169cSJiange Zhao struct amdgpu_iv_entry *entry)
3653636169cSJiange Zhao {
3663636169cSJiange Zhao enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
3673636169cSJiange Zhao
3683636169cSJiange Zhao switch (event) {
3693636169cSJiange Zhao case IDH_FLR_NOTIFICATION:
370f4322b9fSYunxiang Li if (amdgpu_sriov_runtime(adev))
371cfbb6b00SAndrey Grodzovsky WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
37202599bc7SAndrey Grodzovsky &adev->virt.flr_work),
37302599bc7SAndrey Grodzovsky "Failed to queue work! at %s",
37402599bc7SAndrey Grodzovsky __func__);
3753636169cSJiange Zhao break;
3763636169cSJiange Zhao /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
3773636169cSJiange Zhao * it byfar since that polling thread will handle it,
3783636169cSJiange Zhao * other msg like flr complete is not handled here.
3793636169cSJiange Zhao */
3803636169cSJiange Zhao case IDH_CLR_MSG_BUF:
3813636169cSJiange Zhao case IDH_FLR_NOTIFICATION_CMPL:
3823636169cSJiange Zhao case IDH_READY_TO_ACCESS_GPU:
3833636169cSJiange Zhao default:
3843636169cSJiange Zhao break;
3853636169cSJiange Zhao }
3863636169cSJiange Zhao
3873636169cSJiange Zhao return 0;
3883636169cSJiange Zhao }
3893636169cSJiange Zhao
3903636169cSJiange Zhao static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
3913636169cSJiange Zhao .set = xgpu_nv_set_mailbox_ack_irq,
3923636169cSJiange Zhao .process = xgpu_nv_mailbox_ack_irq,
3933636169cSJiange Zhao };
3943636169cSJiange Zhao
3953636169cSJiange Zhao static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
3963636169cSJiange Zhao .set = xgpu_nv_set_mailbox_rcv_irq,
3973636169cSJiange Zhao .process = xgpu_nv_mailbox_rcv_irq,
3983636169cSJiange Zhao };
3993636169cSJiange Zhao
xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device * adev)4003636169cSJiange Zhao void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
4013636169cSJiange Zhao {
4023636169cSJiange Zhao adev->virt.ack_irq.num_types = 1;
4033636169cSJiange Zhao adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
4043636169cSJiange Zhao adev->virt.rcv_irq.num_types = 1;
4053636169cSJiange Zhao adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
4063636169cSJiange Zhao }
4073636169cSJiange Zhao
xgpu_nv_mailbox_add_irq_id(struct amdgpu_device * adev)4083636169cSJiange Zhao int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
4093636169cSJiange Zhao {
4103636169cSJiange Zhao int r;
4113636169cSJiange Zhao
4123636169cSJiange Zhao r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
4133636169cSJiange Zhao if (r)
4143636169cSJiange Zhao return r;
4153636169cSJiange Zhao
4163636169cSJiange Zhao r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
4173636169cSJiange Zhao if (r) {
4183636169cSJiange Zhao amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
4193636169cSJiange Zhao return r;
4203636169cSJiange Zhao }
4213636169cSJiange Zhao
4223636169cSJiange Zhao return 0;
4233636169cSJiange Zhao }
4243636169cSJiange Zhao
xgpu_nv_mailbox_get_irq(struct amdgpu_device * adev)4253636169cSJiange Zhao int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
4263636169cSJiange Zhao {
4273636169cSJiange Zhao int r;
4283636169cSJiange Zhao
4293636169cSJiange Zhao r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
4303636169cSJiange Zhao if (r)
4313636169cSJiange Zhao return r;
4323636169cSJiange Zhao r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
4333636169cSJiange Zhao if (r) {
4343636169cSJiange Zhao amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
4353636169cSJiange Zhao return r;
4363636169cSJiange Zhao }
4373636169cSJiange Zhao
4383636169cSJiange Zhao INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
4393636169cSJiange Zhao
4403636169cSJiange Zhao return 0;
4413636169cSJiange Zhao }
4423636169cSJiange Zhao
xgpu_nv_mailbox_put_irq(struct amdgpu_device * adev)4433636169cSJiange Zhao void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
4443636169cSJiange Zhao {
4453636169cSJiange Zhao amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
4463636169cSJiange Zhao amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
4473636169cSJiange Zhao }
4483636169cSJiange Zhao
xgpu_nv_ras_poison_handler(struct amdgpu_device * adev,enum amdgpu_ras_block block)449ed1e1e42SYiPeng Chai static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
450ed1e1e42SYiPeng Chai enum amdgpu_ras_block block)
451ae844dd7STao Zhou {
452ed1e1e42SYiPeng Chai if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
453ae844dd7STao Zhou xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
454ed1e1e42SYiPeng Chai } else {
4552474414cSVictor Skvortsov amdgpu_virt_fini_data_exchange(adev);
456ed1e1e42SYiPeng Chai xgpu_nv_send_access_requests_with_param(adev,
457ed1e1e42SYiPeng Chai IDH_RAS_POISON, block, 0, 0);
458ed1e1e42SYiPeng Chai }
459ae844dd7STao Zhou }
460ae844dd7STao Zhou
xgpu_nv_rcvd_ras_intr(struct amdgpu_device * adev)461cbda2758SVignesh Chander static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
462cbda2758SVignesh Chander {
463cbda2758SVignesh Chander enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev);
464cbda2758SVignesh Chander
465cbda2758SVignesh Chander return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
466cbda2758SVignesh Chander }
467cbda2758SVignesh Chander
xgpu_nv_req_ras_err_count(struct amdgpu_device * adev)4689928509dSVictor Skvortsov static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
4699928509dSVictor Skvortsov {
4709928509dSVictor Skvortsov return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
4719928509dSVictor Skvortsov }
4729928509dSVictor Skvortsov
xgpu_nv_req_ras_cper_dump(struct amdgpu_device * adev,u64 vf_rptr)473*a91d91b6STony Yi static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
474*a91d91b6STony Yi {
475*a91d91b6STony Yi uint32_t vf_rptr_hi, vf_rptr_lo;
476*a91d91b6STony Yi
477*a91d91b6STony Yi vf_rptr_hi = (uint32_t)(vf_rptr >> 32);
478*a91d91b6STony Yi vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF);
479*a91d91b6STony Yi return xgpu_nv_send_access_requests_with_param(
480*a91d91b6STony Yi adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
481*a91d91b6STony Yi }
482*a91d91b6STony Yi
4833636169cSJiange Zhao const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
4843636169cSJiange Zhao .req_full_gpu = xgpu_nv_request_full_gpu_access,
4853636169cSJiange Zhao .rel_full_gpu = xgpu_nv_release_full_gpu_access,
486aa53bc2eSMonk Liu .req_init_data = xgpu_nv_request_init_data,
4873636169cSJiange Zhao .reset_gpu = xgpu_nv_request_reset,
4885c0a1cddSYunxiang Li .ready_to_reset = xgpu_nv_ready_to_reset,
4895c0a1cddSYunxiang Li .wait_reset = xgpu_nv_wait_reset,
4903636169cSJiange Zhao .trans_msg = xgpu_nv_mailbox_trans_msg,
491ae844dd7STao Zhou .ras_poison_handler = xgpu_nv_ras_poison_handler,
492cbda2758SVignesh Chander .rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
4939928509dSVictor Skvortsov .req_ras_err_count = xgpu_nv_req_ras_err_count,
494*a91d91b6STony Yi .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
4953636169cSJiange Zhao };
496