xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/mxgpu_ai.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
1fb4d8502Sjsg /*
2fb4d8502Sjsg  * Copyright 2014 Advanced Micro Devices, Inc.
3fb4d8502Sjsg  *
4fb4d8502Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
5fb4d8502Sjsg  * copy of this software and associated documentation files (the "Software"),
6fb4d8502Sjsg  * to deal in the Software without restriction, including without limitation
7fb4d8502Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8fb4d8502Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
9fb4d8502Sjsg  * Software is furnished to do so, subject to the following conditions:
10fb4d8502Sjsg  *
11fb4d8502Sjsg  * The above copyright notice and this permission notice shall be included in
12fb4d8502Sjsg  * all copies or substantial portions of the Software.
13fb4d8502Sjsg  *
14fb4d8502Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15fb4d8502Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16fb4d8502Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17fb4d8502Sjsg  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18fb4d8502Sjsg  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19fb4d8502Sjsg  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20fb4d8502Sjsg  * OTHER DEALINGS IN THE SOFTWARE.
21fb4d8502Sjsg  *
22fb4d8502Sjsg  */
23fb4d8502Sjsg 
24fb4d8502Sjsg #include "amdgpu.h"
25fb4d8502Sjsg #include "nbio/nbio_6_1_offset.h"
26fb4d8502Sjsg #include "nbio/nbio_6_1_sh_mask.h"
27fb4d8502Sjsg #include "gc/gc_9_0_offset.h"
28fb4d8502Sjsg #include "gc/gc_9_0_sh_mask.h"
29c349dbc7Sjsg #include "mp/mp_9_0_offset.h"
30fb4d8502Sjsg #include "soc15.h"
31fb4d8502Sjsg #include "vega10_ih.h"
32fb4d8502Sjsg #include "soc15_common.h"
33fb4d8502Sjsg #include "mxgpu_ai.h"
34fb4d8502Sjsg 
351bb76ff1Sjsg #include "amdgpu_reset.h"
361bb76ff1Sjsg 
xgpu_ai_mailbox_send_ack(struct amdgpu_device * adev)37fb4d8502Sjsg static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
38fb4d8502Sjsg {
39fb4d8502Sjsg 	WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
40fb4d8502Sjsg }
41fb4d8502Sjsg 
xgpu_ai_mailbox_set_valid(struct amdgpu_device * adev,bool val)42fb4d8502Sjsg static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
43fb4d8502Sjsg {
44fb4d8502Sjsg 	WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
45fb4d8502Sjsg }
46fb4d8502Sjsg 
47fb4d8502Sjsg /*
48fb4d8502Sjsg  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
49fb4d8502Sjsg  * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
50fb4d8502Sjsg  * by host.
51fb4d8502Sjsg  *
52fb4d8502Sjsg  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
53fb4d8502Sjsg  * correct value since it doesn't return the RCV_DW0 under the case that
54fb4d8502Sjsg  * RCV_MSG_VALID is set by host.
55fb4d8502Sjsg  */
xgpu_ai_mailbox_peek_msg(struct amdgpu_device * adev)56fb4d8502Sjsg static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
57fb4d8502Sjsg {
58fb4d8502Sjsg 	return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
59fb4d8502Sjsg 				mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
60fb4d8502Sjsg }
61fb4d8502Sjsg 
62fb4d8502Sjsg 
xgpu_ai_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)63fb4d8502Sjsg static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
64fb4d8502Sjsg 				   enum idh_event event)
65fb4d8502Sjsg {
66fb4d8502Sjsg 	u32 reg;
67fb4d8502Sjsg 
68fb4d8502Sjsg 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
69fb4d8502Sjsg 					     mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
70fb4d8502Sjsg 	if (reg != event)
71fb4d8502Sjsg 		return -ENOENT;
72fb4d8502Sjsg 
73fb4d8502Sjsg 	xgpu_ai_mailbox_send_ack(adev);
74fb4d8502Sjsg 
75fb4d8502Sjsg 	return 0;
76fb4d8502Sjsg }
77fb4d8502Sjsg 
xgpu_ai_peek_ack(struct amdgpu_device * adev)78fb4d8502Sjsg static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
79fb4d8502Sjsg 	return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
80fb4d8502Sjsg }
81fb4d8502Sjsg 
xgpu_ai_poll_ack(struct amdgpu_device * adev)82fb4d8502Sjsg static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
83fb4d8502Sjsg {
84fb4d8502Sjsg 	int timeout  = AI_MAILBOX_POLL_ACK_TIMEDOUT;
85fb4d8502Sjsg 	u8 reg;
86fb4d8502Sjsg 
87fb4d8502Sjsg 	do {
88fb4d8502Sjsg 		reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
89fb4d8502Sjsg 		if (reg & 2)
90fb4d8502Sjsg 			return 0;
91fb4d8502Sjsg 
92fb4d8502Sjsg 		mdelay(5);
93fb4d8502Sjsg 		timeout -= 5;
94fb4d8502Sjsg 	} while (timeout > 1);
95fb4d8502Sjsg 
96fb4d8502Sjsg 	pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
97fb4d8502Sjsg 
98fb4d8502Sjsg 	return -ETIME;
99fb4d8502Sjsg }
100fb4d8502Sjsg 
xgpu_ai_poll_msg(struct amdgpu_device * adev,enum idh_event event)101fb4d8502Sjsg static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
102fb4d8502Sjsg {
103fb4d8502Sjsg 	int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
104fb4d8502Sjsg 
105fb4d8502Sjsg 	do {
106fb4d8502Sjsg 		r = xgpu_ai_mailbox_rcv_msg(adev, event);
107fb4d8502Sjsg 		if (!r)
108fb4d8502Sjsg 			return 0;
109fb4d8502Sjsg 
110fb4d8502Sjsg 		drm_msleep(10);
111fb4d8502Sjsg 		timeout -= 10;
112fb4d8502Sjsg 	} while (timeout > 1);
113fb4d8502Sjsg 
114fb4d8502Sjsg 	pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
115fb4d8502Sjsg 
116fb4d8502Sjsg 	return -ETIME;
117fb4d8502Sjsg }
118fb4d8502Sjsg 
xgpu_ai_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)119fb4d8502Sjsg static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
120fb4d8502Sjsg 	      enum idh_request req, u32 data1, u32 data2, u32 data3) {
121fb4d8502Sjsg 	u32 reg;
122fb4d8502Sjsg 	int r;
123fb4d8502Sjsg 	uint8_t trn;
124fb4d8502Sjsg 
125fb4d8502Sjsg 	/* IMPORTANT:
126fb4d8502Sjsg 	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
127fb4d8502Sjsg 	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
128fb4d8502Sjsg 	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
129fb4d8502Sjsg 	 * will return immediatly
130fb4d8502Sjsg 	 */
131fb4d8502Sjsg 	do {
132fb4d8502Sjsg 		xgpu_ai_mailbox_set_valid(adev, false);
133fb4d8502Sjsg 		trn = xgpu_ai_peek_ack(adev);
134fb4d8502Sjsg 		if (trn) {
135fb4d8502Sjsg 			pr_err("trn=%x ACK should not assert! wait again !\n", trn);
136fb4d8502Sjsg 			drm_msleep(1);
137fb4d8502Sjsg 		}
138fb4d8502Sjsg 	} while(trn);
139fb4d8502Sjsg 
140fb4d8502Sjsg 	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
141fb4d8502Sjsg 					     mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
142fb4d8502Sjsg 	reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
143fb4d8502Sjsg 			    MSGBUF_DATA, req);
144fb4d8502Sjsg 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
145fb4d8502Sjsg 		      reg);
146fb4d8502Sjsg 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
147fb4d8502Sjsg 				data1);
148fb4d8502Sjsg 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
149fb4d8502Sjsg 				data2);
150fb4d8502Sjsg 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
151fb4d8502Sjsg 				data3);
152fb4d8502Sjsg 
153fb4d8502Sjsg 	xgpu_ai_mailbox_set_valid(adev, true);
154fb4d8502Sjsg 
155fb4d8502Sjsg 	/* start to poll ack */
156fb4d8502Sjsg 	r = xgpu_ai_poll_ack(adev);
157fb4d8502Sjsg 	if (r)
158fb4d8502Sjsg 		pr_err("Doesn't get ack from pf, continue\n");
159fb4d8502Sjsg 
160fb4d8502Sjsg 	xgpu_ai_mailbox_set_valid(adev, false);
161fb4d8502Sjsg }
162fb4d8502Sjsg 
xgpu_ai_send_access_requests(struct amdgpu_device * adev,enum idh_request req)163fb4d8502Sjsg static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
164fb4d8502Sjsg 					enum idh_request req)
165fb4d8502Sjsg {
166fb4d8502Sjsg 	int r;
167fb4d8502Sjsg 
168fb4d8502Sjsg 	xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
169fb4d8502Sjsg 
170fb4d8502Sjsg 	/* start to check msg if request is idh_req_gpu_init_access */
171fb4d8502Sjsg 	if (req == IDH_REQ_GPU_INIT_ACCESS ||
172fb4d8502Sjsg 		req == IDH_REQ_GPU_FINI_ACCESS ||
173fb4d8502Sjsg 		req == IDH_REQ_GPU_RESET_ACCESS) {
174fb4d8502Sjsg 		r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
175fb4d8502Sjsg 		if (r) {
176fb4d8502Sjsg 			pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
177fb4d8502Sjsg 			return r;
178fb4d8502Sjsg 		}
179fb4d8502Sjsg 		/* Retrieve checksum from mailbox2 */
180fb4d8502Sjsg 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
181fb4d8502Sjsg 			adev->virt.fw_reserve.checksum_key =
182fb4d8502Sjsg 				RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
183fb4d8502Sjsg 					mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
184fb4d8502Sjsg 		}
1851bb76ff1Sjsg 	} else if (req == IDH_REQ_GPU_INIT_DATA){
1861bb76ff1Sjsg 		/* Dummy REQ_GPU_INIT_DATA handling */
1871bb76ff1Sjsg 		r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY);
1881bb76ff1Sjsg 		/* version set to 0 since dummy */
1891bb76ff1Sjsg 		adev->virt.req_init_data_ver = 0;
190fb4d8502Sjsg 	}
191fb4d8502Sjsg 
192fb4d8502Sjsg 	return 0;
193fb4d8502Sjsg }
194fb4d8502Sjsg 
xgpu_ai_request_reset(struct amdgpu_device * adev)195fb4d8502Sjsg static int xgpu_ai_request_reset(struct amdgpu_device *adev)
196fb4d8502Sjsg {
1975ca02815Sjsg 	int ret, i = 0;
1985ca02815Sjsg 
1995ca02815Sjsg 	while (i < AI_MAILBOX_POLL_MSG_REP_MAX) {
2005ca02815Sjsg 		ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
2015ca02815Sjsg 		if (!ret)
2025ca02815Sjsg 			break;
2035ca02815Sjsg 		i++;
2045ca02815Sjsg 	}
2055ca02815Sjsg 
2065ca02815Sjsg 	return ret;
207fb4d8502Sjsg }
208fb4d8502Sjsg 
xgpu_ai_request_full_gpu_access(struct amdgpu_device * adev,bool init)209fb4d8502Sjsg static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
210fb4d8502Sjsg 					   bool init)
211fb4d8502Sjsg {
212fb4d8502Sjsg 	enum idh_request req;
213fb4d8502Sjsg 
214fb4d8502Sjsg 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
215fb4d8502Sjsg 	return xgpu_ai_send_access_requests(adev, req);
216fb4d8502Sjsg }
217fb4d8502Sjsg 
xgpu_ai_release_full_gpu_access(struct amdgpu_device * adev,bool init)218fb4d8502Sjsg static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
219fb4d8502Sjsg 					   bool init)
220fb4d8502Sjsg {
221fb4d8502Sjsg 	enum idh_request req;
222fb4d8502Sjsg 	int r = 0;
223fb4d8502Sjsg 
224fb4d8502Sjsg 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
225fb4d8502Sjsg 	r = xgpu_ai_send_access_requests(adev, req);
226fb4d8502Sjsg 
227fb4d8502Sjsg 	return r;
228fb4d8502Sjsg }
229fb4d8502Sjsg 
xgpu_ai_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)230fb4d8502Sjsg static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
231fb4d8502Sjsg 					struct amdgpu_irq_src *source,
232fb4d8502Sjsg 					struct amdgpu_iv_entry *entry)
233fb4d8502Sjsg {
234fb4d8502Sjsg 	DRM_DEBUG("get ack intr and do nothing.\n");
235fb4d8502Sjsg 	return 0;
236fb4d8502Sjsg }
237fb4d8502Sjsg 
xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)238fb4d8502Sjsg static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
239fb4d8502Sjsg 					struct amdgpu_irq_src *source,
240fb4d8502Sjsg 					unsigned type,
241fb4d8502Sjsg 					enum amdgpu_interrupt_state state)
242fb4d8502Sjsg {
243fb4d8502Sjsg 	u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
244fb4d8502Sjsg 
245fb4d8502Sjsg 	tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
246fb4d8502Sjsg 				(state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
247fb4d8502Sjsg 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
248fb4d8502Sjsg 
249fb4d8502Sjsg 	return 0;
250fb4d8502Sjsg }
251fb4d8502Sjsg 
xgpu_ai_mailbox_flr_work(struct work_struct * work)252fb4d8502Sjsg static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
253fb4d8502Sjsg {
254fb4d8502Sjsg 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
255fb4d8502Sjsg 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
256fb4d8502Sjsg 	int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
257fb4d8502Sjsg 
258fb4d8502Sjsg 	/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
259fb4d8502Sjsg 	 * otherwise the mailbox msg will be ruined/reseted by
260fb4d8502Sjsg 	 * the VF FLR.
261fb4d8502Sjsg 	 */
2621bb76ff1Sjsg 	if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
263ad8b1aafSjsg 		return;
264ad8b1aafSjsg 
2651bb76ff1Sjsg 	down_write(&adev->reset_domain->sem);
2661bb76ff1Sjsg 
2675ca02815Sjsg 	amdgpu_virt_fini_data_exchange(adev);
268fb4d8502Sjsg 
2695ca02815Sjsg 	xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
2705ca02815Sjsg 
271fb4d8502Sjsg 	do {
272fb4d8502Sjsg 		if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
273fb4d8502Sjsg 			goto flr_done;
274fb4d8502Sjsg 
275fb4d8502Sjsg 		drm_msleep(10);
276fb4d8502Sjsg 		timeout -= 10;
277fb4d8502Sjsg 	} while (timeout > 1);
278fb4d8502Sjsg 
279fb4d8502Sjsg flr_done:
2801bb76ff1Sjsg 	atomic_set(&adev->reset_domain->in_gpu_reset, 0);
2811bb76ff1Sjsg 	up_write(&adev->reset_domain->sem);
282fb4d8502Sjsg 
283fb4d8502Sjsg 	/* Trigger recovery for world switch failure if no TDR */
284c349dbc7Sjsg 	if (amdgpu_device_should_recover_gpu(adev)
285ad8b1aafSjsg 		&& (!amdgpu_device_has_job_running(adev) ||
2861bb76ff1Sjsg 			adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
2871bb76ff1Sjsg 		struct amdgpu_reset_context reset_context;
2881bb76ff1Sjsg 		memset(&reset_context, 0, sizeof(reset_context));
2891bb76ff1Sjsg 
2901bb76ff1Sjsg 		reset_context.method = AMD_RESET_METHOD_NONE;
2911bb76ff1Sjsg 		reset_context.reset_req_dev = adev;
2921bb76ff1Sjsg 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2931bb76ff1Sjsg 
2941bb76ff1Sjsg 		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
2951bb76ff1Sjsg 	}
296fb4d8502Sjsg }
297fb4d8502Sjsg 
xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)298fb4d8502Sjsg static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
299fb4d8502Sjsg 				       struct amdgpu_irq_src *src,
300fb4d8502Sjsg 				       unsigned type,
301fb4d8502Sjsg 				       enum amdgpu_interrupt_state state)
302fb4d8502Sjsg {
303fb4d8502Sjsg 	u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
304fb4d8502Sjsg 
305fb4d8502Sjsg 	tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
306fb4d8502Sjsg 			    (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
307fb4d8502Sjsg 	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
308fb4d8502Sjsg 
309fb4d8502Sjsg 	return 0;
310fb4d8502Sjsg }
311fb4d8502Sjsg 
xgpu_ai_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)312fb4d8502Sjsg static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
313fb4d8502Sjsg 				   struct amdgpu_irq_src *source,
314fb4d8502Sjsg 				   struct amdgpu_iv_entry *entry)
315fb4d8502Sjsg {
316fb4d8502Sjsg 	enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
317fb4d8502Sjsg 
318fb4d8502Sjsg 	switch (event) {
319fb4d8502Sjsg 		case IDH_FLR_NOTIFICATION:
3201bb76ff1Sjsg 		if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
3211bb76ff1Sjsg 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
3221bb76ff1Sjsg 								&adev->virt.flr_work),
3231bb76ff1Sjsg 				  "Failed to queue work! at %s",
3241bb76ff1Sjsg 				  __func__);
325fb4d8502Sjsg 		break;
326c349dbc7Sjsg 		case IDH_QUERY_ALIVE:
327c349dbc7Sjsg 			xgpu_ai_mailbox_send_ack(adev);
328c349dbc7Sjsg 			break;
329fb4d8502Sjsg 		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
330fb4d8502Sjsg 		 * it byfar since that polling thread will handle it,
331fb4d8502Sjsg 		 * other msg like flr complete is not handled here.
332fb4d8502Sjsg 		 */
333fb4d8502Sjsg 		case IDH_CLR_MSG_BUF:
334fb4d8502Sjsg 		case IDH_FLR_NOTIFICATION_CMPL:
335fb4d8502Sjsg 		case IDH_READY_TO_ACCESS_GPU:
336fb4d8502Sjsg 		default:
337fb4d8502Sjsg 		break;
338fb4d8502Sjsg 	}
339fb4d8502Sjsg 
340fb4d8502Sjsg 	return 0;
341fb4d8502Sjsg }
342fb4d8502Sjsg 
343fb4d8502Sjsg static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
344fb4d8502Sjsg 	.set = xgpu_ai_set_mailbox_ack_irq,
345fb4d8502Sjsg 	.process = xgpu_ai_mailbox_ack_irq,
346fb4d8502Sjsg };
347fb4d8502Sjsg 
348fb4d8502Sjsg static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
349fb4d8502Sjsg 	.set = xgpu_ai_set_mailbox_rcv_irq,
350fb4d8502Sjsg 	.process = xgpu_ai_mailbox_rcv_irq,
351fb4d8502Sjsg };
352fb4d8502Sjsg 
xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device * adev)353fb4d8502Sjsg void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
354fb4d8502Sjsg {
355fb4d8502Sjsg 	adev->virt.ack_irq.num_types = 1;
356fb4d8502Sjsg 	adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
357fb4d8502Sjsg 	adev->virt.rcv_irq.num_types = 1;
358fb4d8502Sjsg 	adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
359fb4d8502Sjsg }
360fb4d8502Sjsg 
xgpu_ai_mailbox_add_irq_id(struct amdgpu_device * adev)361fb4d8502Sjsg int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
362fb4d8502Sjsg {
363fb4d8502Sjsg 	int r;
364fb4d8502Sjsg 
365fb4d8502Sjsg 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
366fb4d8502Sjsg 	if (r)
367fb4d8502Sjsg 		return r;
368fb4d8502Sjsg 
369fb4d8502Sjsg 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
370fb4d8502Sjsg 	if (r) {
371fb4d8502Sjsg 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
372fb4d8502Sjsg 		return r;
373fb4d8502Sjsg 	}
374fb4d8502Sjsg 
375fb4d8502Sjsg 	return 0;
376fb4d8502Sjsg }
377fb4d8502Sjsg 
xgpu_ai_mailbox_get_irq(struct amdgpu_device * adev)378fb4d8502Sjsg int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
379fb4d8502Sjsg {
380fb4d8502Sjsg 	int r;
381fb4d8502Sjsg 
382fb4d8502Sjsg 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
383fb4d8502Sjsg 	if (r)
384fb4d8502Sjsg 		return r;
385fb4d8502Sjsg 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
386fb4d8502Sjsg 	if (r) {
387fb4d8502Sjsg 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
388fb4d8502Sjsg 		return r;
389fb4d8502Sjsg 	}
390fb4d8502Sjsg 
391fb4d8502Sjsg 	INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
392fb4d8502Sjsg 
393fb4d8502Sjsg 	return 0;
394fb4d8502Sjsg }
395fb4d8502Sjsg 
xgpu_ai_mailbox_put_irq(struct amdgpu_device * adev)396fb4d8502Sjsg void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
397fb4d8502Sjsg {
398fb4d8502Sjsg 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
399fb4d8502Sjsg 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
400fb4d8502Sjsg }
401fb4d8502Sjsg 
xgpu_ai_request_init_data(struct amdgpu_device * adev)4021bb76ff1Sjsg static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
4031bb76ff1Sjsg {
4041bb76ff1Sjsg 	return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
4051bb76ff1Sjsg }
4061bb76ff1Sjsg 
xgpu_ai_ras_poison_handler(struct amdgpu_device * adev)407*f005ef32Sjsg static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev)
408*f005ef32Sjsg {
409*f005ef32Sjsg 	xgpu_ai_send_access_requests(adev, IDH_RAS_POISON);
410*f005ef32Sjsg }
411*f005ef32Sjsg 
412fb4d8502Sjsg const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
413fb4d8502Sjsg 	.req_full_gpu	= xgpu_ai_request_full_gpu_access,
414fb4d8502Sjsg 	.rel_full_gpu	= xgpu_ai_release_full_gpu_access,
415fb4d8502Sjsg 	.reset_gpu = xgpu_ai_request_reset,
416fb4d8502Sjsg 	.wait_reset = NULL,
417fb4d8502Sjsg 	.trans_msg = xgpu_ai_mailbox_trans_msg,
4181bb76ff1Sjsg 	.req_init_data  = xgpu_ai_request_init_data,
419*f005ef32Sjsg 	.ras_poison_handler = xgpu_ai_ras_poison_handler,
420fb4d8502Sjsg };
421