1 /* $NetBSD: amdgpu_vega10_ih.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $ */
2
3 /*
4 * Copyright 2016 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vega10_ih.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
28
29 #include <linux/pci.h>
30
31 #include "amdgpu.h"
32 #include "amdgpu_ih.h"
33 #include "soc15.h"
34
35 #include "oss/osssys_4_0_offset.h"
36 #include "oss/osssys_4_0_sh_mask.h"
37
38 #include "soc15_common.h"
39 #include "vega10_ih.h"
40
41 #define MAX_REARM_RETRY 10
42
43 static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
44
45 /**
46 * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
47 *
48 * @adev: amdgpu_device pointer
49 *
50 * Enable the interrupt ring buffer (VEGA10).
51 */
vega10_ih_enable_interrupts(struct amdgpu_device * adev)52 static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
53 {
54 u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
55
56 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
57 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
58 if (amdgpu_sriov_vf(adev)) {
59 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
60 DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
61 return;
62 }
63 } else {
64 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
65 }
66 adev->irq.ih.enabled = true;
67
68 if (adev->irq.ih1.ring_size) {
69 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
70 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
71 RB_ENABLE, 1);
72 if (amdgpu_sriov_vf(adev)) {
73 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
74 ih_rb_cntl)) {
75 DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
76 return;
77 }
78 } else {
79 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
80 }
81 adev->irq.ih1.enabled = true;
82 }
83
84 if (adev->irq.ih2.ring_size) {
85 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
86 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
87 RB_ENABLE, 1);
88 if (amdgpu_sriov_vf(adev)) {
89 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
90 ih_rb_cntl)) {
91 DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
92 return;
93 }
94 } else {
95 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
96 }
97 adev->irq.ih2.enabled = true;
98 }
99 }
100
101 /**
102 * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
103 *
104 * @adev: amdgpu_device pointer
105 *
106 * Disable the interrupt ring buffer (VEGA10).
107 */
vega10_ih_disable_interrupts(struct amdgpu_device * adev)108 static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
109 {
110 u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
111
112 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
113 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
114 if (amdgpu_sriov_vf(adev)) {
115 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
116 DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
117 return;
118 }
119 } else {
120 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
121 }
122
123 /* set rptr, wptr to 0 */
124 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
125 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
126 adev->irq.ih.enabled = false;
127 adev->irq.ih.rptr = 0;
128
129 if (adev->irq.ih1.ring_size) {
130 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
131 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
132 RB_ENABLE, 0);
133 if (amdgpu_sriov_vf(adev)) {
134 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
135 ih_rb_cntl)) {
136 DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
137 return;
138 }
139 } else {
140 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
141 }
142 /* set rptr, wptr to 0 */
143 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
144 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
145 adev->irq.ih1.enabled = false;
146 adev->irq.ih1.rptr = 0;
147 }
148
149 if (adev->irq.ih2.ring_size) {
150 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
151 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
152 RB_ENABLE, 0);
153 if (amdgpu_sriov_vf(adev)) {
154 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
155 ih_rb_cntl)) {
156 DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
157 return;
158 }
159 } else {
160 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
161 }
162
163 /* set rptr, wptr to 0 */
164 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
165 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
166 adev->irq.ih2.enabled = false;
167 adev->irq.ih2.rptr = 0;
168 }
169 }
170
vega10_ih_rb_cntl(struct amdgpu_ih_ring * ih,uint32_t ih_rb_cntl)171 static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
172 {
173 int rb_bufsz = order_base_2(ih->ring_size / 4);
174
175 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
176 MC_SPACE, ih->use_bus_addr ? 1 : 4);
177 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
178 WPTR_OVERFLOW_CLEAR, 1);
179 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
180 WPTR_OVERFLOW_ENABLE, 1);
181 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
182 /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
183 * value is written to memory
184 */
185 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
186 WPTR_WRITEBACK_ENABLE, 1);
187 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
188 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
189 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
190
191 return ih_rb_cntl;
192 }
193
vega10_ih_doorbell_rptr(struct amdgpu_ih_ring * ih)194 static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
195 {
196 u32 ih_doorbell_rtpr = 0;
197
198 if (ih->use_doorbell) {
199 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
200 IH_DOORBELL_RPTR, OFFSET,
201 ih->doorbell_index);
202 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
203 IH_DOORBELL_RPTR,
204 ENABLE, 1);
205 } else {
206 ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
207 IH_DOORBELL_RPTR,
208 ENABLE, 0);
209 }
210 return ih_doorbell_rtpr;
211 }
212
213 /**
214 * vega10_ih_irq_init - init and enable the interrupt ring
215 *
216 * @adev: amdgpu_device pointer
217 *
218 * Allocate a ring buffer for the interrupt controller,
219 * enable the RLC, disable interrupts, enable the IH
220 * ring buffer and enable it (VI).
221 * Called at device load and reume.
222 * Returns 0 for success, errors for failure.
223 */
vega10_ih_irq_init(struct amdgpu_device * adev)224 static int vega10_ih_irq_init(struct amdgpu_device *adev)
225 {
226 struct amdgpu_ih_ring *ih;
227 u32 ih_rb_cntl, ih_chicken;
228 int ret = 0;
229 u32 tmp;
230
231 /* disable irqs */
232 vega10_ih_disable_interrupts(adev);
233
234 adev->nbio.funcs->ih_control(adev);
235
236 ih = &adev->irq.ih;
237 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
238 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
239 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
240
241 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
242 ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
243 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
244 !!adev->irq.msi_enabled);
245 if (amdgpu_sriov_vf(adev)) {
246 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
247 DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
248 return -ETIMEDOUT;
249 }
250 } else {
251 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
252 }
253
254 if ((adev->asic_type == CHIP_ARCTURUS &&
255 adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
256 adev->asic_type == CHIP_RENOIR) {
257 ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
258 if (adev->irq.ih.use_bus_addr) {
259 ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
260 MC_SPACE_GPA_ENABLE, 1);
261 } else {
262 ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
263 MC_SPACE_FBPA_ENABLE, 1);
264 }
265 WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
266 }
267
268 /* set the writeback address whether it's enabled or not */
269 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
270 lower_32_bits(ih->wptr_addr));
271 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI,
272 upper_32_bits(ih->wptr_addr) & 0xFFFF);
273
274 /* set rptr, wptr to 0 */
275 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
276 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
277
278 WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
279 vega10_ih_doorbell_rptr(ih));
280
281 ih = &adev->irq.ih1;
282 if (ih->ring_size) {
283 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
284 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
285 (ih->gpu_addr >> 40) & 0xff);
286
287 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
288 ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
289 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
290 WPTR_OVERFLOW_ENABLE, 0);
291 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
292 RB_FULL_DRAIN_ENABLE, 1);
293 if (amdgpu_sriov_vf(adev)) {
294 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
295 ih_rb_cntl)) {
296 DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
297 return -ETIMEDOUT;
298 }
299 } else {
300 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
301 }
302
303 /* set rptr, wptr to 0 */
304 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
305 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
306
307 WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
308 vega10_ih_doorbell_rptr(ih));
309 }
310
311 ih = &adev->irq.ih2;
312 if (ih->ring_size) {
313 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
314 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
315 (ih->gpu_addr >> 40) & 0xff);
316
317 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
318 ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
319
320 if (amdgpu_sriov_vf(adev)) {
321 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
322 ih_rb_cntl)) {
323 DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
324 return -ETIMEDOUT;
325 }
326 } else {
327 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
328 }
329
330 /* set rptr, wptr to 0 */
331 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
332 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
333
334 WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
335 vega10_ih_doorbell_rptr(ih));
336 }
337
338 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
339 tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
340 CLIENT18_IS_STORM_CLIENT, 1);
341 WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
342
343 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
344 tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
345 WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
346
347 pci_set_master(adev->pdev);
348
349 /* enable interrupts */
350 vega10_ih_enable_interrupts(adev);
351
352 return ret;
353 }
354
355 /**
356 * vega10_ih_irq_disable - disable interrupts
357 *
358 * @adev: amdgpu_device pointer
359 *
360 * Disable interrupts on the hw (VEGA10).
361 */
vega10_ih_irq_disable(struct amdgpu_device * adev)362 static void vega10_ih_irq_disable(struct amdgpu_device *adev)
363 {
364 vega10_ih_disable_interrupts(adev);
365
366 /* Wait and acknowledge irq */
367 mdelay(1);
368 }
369
370 /**
371 * vega10_ih_get_wptr - get the IH ring buffer wptr
372 *
373 * @adev: amdgpu_device pointer
374 *
375 * Get the IH ring buffer wptr from either the register
376 * or the writeback memory buffer (VEGA10). Also check for
377 * ring buffer overflow and deal with it.
378 * Returns the value of the wptr.
379 */
vega10_ih_get_wptr(struct amdgpu_device * adev,struct amdgpu_ih_ring * ih)380 static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
381 struct amdgpu_ih_ring *ih)
382 {
383 u32 wptr, reg, tmp;
384
385 wptr = le32_to_cpu(*ih->wptr_cpu);
386
387 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
388 goto out;
389
390 /* Double check that the overflow wasn't already cleared. */
391
392 if (ih == &adev->irq.ih)
393 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
394 else if (ih == &adev->irq.ih1)
395 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
396 else if (ih == &adev->irq.ih2)
397 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
398 else
399 BUG();
400
401 wptr = RREG32_NO_KIQ(reg);
402 if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
403 goto out;
404
405 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
406
407 /* When a ring buffer overflow happen start parsing interrupt
408 * from the last not overwritten vector (wptr + 32). Hopefully
409 * this should allow us to catchup.
410 */
411 tmp = (wptr + 32) & ih->ptr_mask;
412 dev_warn(adev->dev, "IH ring buffer overflow "
413 "(0x%08X, 0x%08X, 0x%08X)\n",
414 wptr, ih->rptr, tmp);
415 ih->rptr = tmp;
416
417 if (ih == &adev->irq.ih)
418 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
419 else if (ih == &adev->irq.ih1)
420 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
421 else if (ih == &adev->irq.ih2)
422 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
423 else
424 BUG();
425
426 tmp = RREG32_NO_KIQ(reg);
427 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
428 WREG32_NO_KIQ(reg, tmp);
429
430 out:
431 return (wptr & ih->ptr_mask);
432 }
433
434 /**
435 * vega10_ih_decode_iv - decode an interrupt vector
436 *
437 * @adev: amdgpu_device pointer
438 *
439 * Decodes the interrupt vector at the current rptr
440 * position and also advance the position.
441 */
vega10_ih_decode_iv(struct amdgpu_device * adev,struct amdgpu_ih_ring * ih,struct amdgpu_iv_entry * entry)442 static void vega10_ih_decode_iv(struct amdgpu_device *adev,
443 struct amdgpu_ih_ring *ih,
444 struct amdgpu_iv_entry *entry)
445 {
446 /* wptr/rptr are in bytes! */
447 u32 ring_index = ih->rptr >> 2;
448 uint32_t dw[8];
449
450 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
451 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
452 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
453 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
454 dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
455 dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
456 dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
457 dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
458
459 entry->client_id = dw[0] & 0xff;
460 entry->src_id = (dw[0] >> 8) & 0xff;
461 entry->ring_id = (dw[0] >> 16) & 0xff;
462 entry->vmid = (dw[0] >> 24) & 0xf;
463 entry->vmid_src = (dw[0] >> 31);
464 entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
465 entry->timestamp_src = dw[2] >> 31;
466 entry->pasid = dw[3] & 0xffff;
467 entry->pasid_src = dw[3] >> 31;
468 entry->src_data[0] = dw[4];
469 entry->src_data[1] = dw[5];
470 entry->src_data[2] = dw[6];
471 entry->src_data[3] = dw[7];
472
473 /* wptr/rptr are in bytes! */
474 ih->rptr += 32;
475 }
476
477 /**
478 * vega10_ih_irq_rearm - rearm IRQ if lost
479 *
480 * @adev: amdgpu_device pointer
481 *
482 */
vega10_ih_irq_rearm(struct amdgpu_device * adev,struct amdgpu_ih_ring * ih)483 static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
484 struct amdgpu_ih_ring *ih)
485 {
486 uint32_t reg_rptr = 0;
487 uint32_t v = 0;
488 uint32_t i = 0;
489
490 if (ih == &adev->irq.ih)
491 reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
492 else if (ih == &adev->irq.ih1)
493 reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
494 else if (ih == &adev->irq.ih2)
495 reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
496 else
497 return;
498
499 /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
500 for (i = 0; i < MAX_REARM_RETRY; i++) {
501 v = RREG32_NO_KIQ(reg_rptr);
502 if ((v < ih->ring_size) && (v != ih->rptr))
503 WDOORBELL32(ih->doorbell_index, ih->rptr);
504 else
505 break;
506 }
507 }
508
509 /**
510 * vega10_ih_set_rptr - set the IH ring buffer rptr
511 *
512 * @adev: amdgpu_device pointer
513 *
514 * Set the IH ring buffer rptr.
515 */
vega10_ih_set_rptr(struct amdgpu_device * adev,struct amdgpu_ih_ring * ih)516 static void vega10_ih_set_rptr(struct amdgpu_device *adev,
517 struct amdgpu_ih_ring *ih)
518 {
519 if (ih->use_doorbell) {
520 /* XXX check if swapping is necessary on BE */
521 *ih->rptr_cpu = ih->rptr;
522 WDOORBELL32(ih->doorbell_index, ih->rptr);
523
524 if (amdgpu_sriov_vf(adev))
525 vega10_ih_irq_rearm(adev, ih);
526 } else if (ih == &adev->irq.ih) {
527 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
528 } else if (ih == &adev->irq.ih1) {
529 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
530 } else if (ih == &adev->irq.ih2) {
531 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
532 }
533 }
534
535 /**
536 * vega10_ih_self_irq - dispatch work for ring 1 and 2
537 *
538 * @adev: amdgpu_device pointer
539 * @source: irq source
540 * @entry: IV with WPTR update
541 *
542 * Update the WPTR from the IV and schedule work to handle the entries.
543 */
vega10_ih_self_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)544 static int vega10_ih_self_irq(struct amdgpu_device *adev,
545 struct amdgpu_irq_src *source,
546 struct amdgpu_iv_entry *entry)
547 {
548 uint32_t wptr = cpu_to_le32(entry->src_data[0]);
549
550 switch (entry->ring_id) {
551 case 1:
552 *adev->irq.ih1.wptr_cpu = wptr;
553 schedule_work(&adev->irq.ih1_work);
554 break;
555 case 2:
556 *adev->irq.ih2.wptr_cpu = wptr;
557 schedule_work(&adev->irq.ih2_work);
558 break;
559 default: break;
560 }
561 return 0;
562 }
563
564 static const struct amdgpu_irq_src_funcs vega10_ih_self_irq_funcs = {
565 .process = vega10_ih_self_irq,
566 };
567
vega10_ih_set_self_irq_funcs(struct amdgpu_device * adev)568 static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
569 {
570 adev->irq.self_irq.num_types = 0;
571 adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs;
572 }
573
vega10_ih_early_init(void * handle)574 static int vega10_ih_early_init(void *handle)
575 {
576 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
577
578 vega10_ih_set_interrupt_funcs(adev);
579 vega10_ih_set_self_irq_funcs(adev);
580 return 0;
581 }
582
vega10_ih_sw_init(void * handle)583 static int vega10_ih_sw_init(void *handle)
584 {
585 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
586 int r;
587
588 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
589 &adev->irq.self_irq);
590 if (r)
591 return r;
592
593 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
594 if (r)
595 return r;
596
597 adev->irq.ih.use_doorbell = true;
598 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
599
600 r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
601 if (r)
602 return r;
603
604 adev->irq.ih1.use_doorbell = true;
605 adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
606
607 r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
608 if (r)
609 return r;
610
611 adev->irq.ih2.use_doorbell = true;
612 adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
613
614 r = amdgpu_irq_init(adev);
615
616 return r;
617 }
618
vega10_ih_sw_fini(void * handle)619 static int vega10_ih_sw_fini(void *handle)
620 {
621 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
622
623 amdgpu_irq_fini(adev);
624 amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
625 amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
626 amdgpu_ih_ring_fini(adev, &adev->irq.ih);
627
628 return 0;
629 }
630
vega10_ih_hw_init(void * handle)631 static int vega10_ih_hw_init(void *handle)
632 {
633 int r;
634 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
635
636 r = vega10_ih_irq_init(adev);
637 if (r)
638 return r;
639
640 return 0;
641 }
642
vega10_ih_hw_fini(void * handle)643 static int vega10_ih_hw_fini(void *handle)
644 {
645 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
646
647 vega10_ih_irq_disable(adev);
648
649 return 0;
650 }
651
vega10_ih_suspend(void * handle)652 static int vega10_ih_suspend(void *handle)
653 {
654 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
655
656 return vega10_ih_hw_fini(adev);
657 }
658
vega10_ih_resume(void * handle)659 static int vega10_ih_resume(void *handle)
660 {
661 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
662
663 return vega10_ih_hw_init(adev);
664 }
665
vega10_ih_is_idle(void * handle)666 static bool vega10_ih_is_idle(void *handle)
667 {
668 /* todo */
669 return true;
670 }
671
vega10_ih_wait_for_idle(void * handle)672 static int vega10_ih_wait_for_idle(void *handle)
673 {
674 /* todo */
675 return -ETIMEDOUT;
676 }
677
vega10_ih_soft_reset(void * handle)678 static int vega10_ih_soft_reset(void *handle)
679 {
680 /* todo */
681
682 return 0;
683 }
684
vega10_ih_update_clockgating_state(struct amdgpu_device * adev,bool enable)685 static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
686 bool enable)
687 {
688 uint32_t data, def, field_val;
689
690 if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
691 def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
692 field_val = enable ? 0 : 1;
693 /**
694 * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE
695 * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
696 */
697 if (adev->asic_type > CHIP_VEGA10) {
698 data = REG_SET_FIELD(data, IH_CLK_CTRL,
699 IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
700 data = REG_SET_FIELD(data, IH_CLK_CTRL,
701 IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
702 }
703
704 data = REG_SET_FIELD(data, IH_CLK_CTRL,
705 DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
706 data = REG_SET_FIELD(data, IH_CLK_CTRL,
707 OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
708 data = REG_SET_FIELD(data, IH_CLK_CTRL,
709 LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
710 data = REG_SET_FIELD(data, IH_CLK_CTRL,
711 DYN_CLK_SOFT_OVERRIDE, field_val);
712 data = REG_SET_FIELD(data, IH_CLK_CTRL,
713 REG_CLK_SOFT_OVERRIDE, field_val);
714 if (def != data)
715 WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
716 }
717 }
718
vega10_ih_set_clockgating_state(void * handle,enum amd_clockgating_state state)719 static int vega10_ih_set_clockgating_state(void *handle,
720 enum amd_clockgating_state state)
721 {
722 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
723
724 vega10_ih_update_clockgating_state(adev,
725 state == AMD_CG_STATE_GATE);
726 return 0;
727
728 }
729
vega10_ih_set_powergating_state(void * handle,enum amd_powergating_state state)730 static int vega10_ih_set_powergating_state(void *handle,
731 enum amd_powergating_state state)
732 {
733 return 0;
734 }
735
736 const struct amd_ip_funcs vega10_ih_ip_funcs = {
737 .name = "vega10_ih",
738 .early_init = vega10_ih_early_init,
739 .late_init = NULL,
740 .sw_init = vega10_ih_sw_init,
741 .sw_fini = vega10_ih_sw_fini,
742 .hw_init = vega10_ih_hw_init,
743 .hw_fini = vega10_ih_hw_fini,
744 .suspend = vega10_ih_suspend,
745 .resume = vega10_ih_resume,
746 .is_idle = vega10_ih_is_idle,
747 .wait_for_idle = vega10_ih_wait_for_idle,
748 .soft_reset = vega10_ih_soft_reset,
749 .set_clockgating_state = vega10_ih_set_clockgating_state,
750 .set_powergating_state = vega10_ih_set_powergating_state,
751 };
752
753 static const struct amdgpu_ih_funcs vega10_ih_funcs = {
754 .get_wptr = vega10_ih_get_wptr,
755 .decode_iv = vega10_ih_decode_iv,
756 .set_rptr = vega10_ih_set_rptr
757 };
758
vega10_ih_set_interrupt_funcs(struct amdgpu_device * adev)759 static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
760 {
761 adev->irq.ih_funcs = &vega10_ih_funcs;
762 }
763
764 const struct amdgpu_ip_block_version vega10_ih_ip_block =
765 {
766 .type = AMD_IP_BLOCK_TYPE_IH,
767 .major = 4,
768 .minor = 0,
769 .rev = 0,
770 .funcs = &vega10_ih_ip_funcs,
771 };
772