xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_vi.c (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /*	$NetBSD: amdgpu_vi.c,v 1.1 2018/08/27 14:10:14 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2014 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vi.c,v 1.1 2018/08/27 14:10:14 riastradh Exp $");
27 
28 #include <linux/firmware.h>
29 #include <linux/slab.h>
30 #include <linux/module.h>
31 #include "drmP.h"
32 #include "amdgpu.h"
33 #include "amdgpu_atombios.h"
34 #include "amdgpu_ih.h"
35 #include "amdgpu_uvd.h"
36 #include "amdgpu_vce.h"
37 #include "amdgpu_ucode.h"
38 #include "atom.h"
39 
40 #include "gmc/gmc_8_1_d.h"
41 #include "gmc/gmc_8_1_sh_mask.h"
42 
43 #include "oss/oss_3_0_d.h"
44 #include "oss/oss_3_0_sh_mask.h"
45 
46 #include "bif/bif_5_0_d.h"
47 #include "bif/bif_5_0_sh_mask.h"
48 
49 #include "gca/gfx_8_0_d.h"
50 #include "gca/gfx_8_0_sh_mask.h"
51 
52 #include "smu/smu_7_1_1_d.h"
53 #include "smu/smu_7_1_1_sh_mask.h"
54 
55 #include "uvd/uvd_5_0_d.h"
56 #include "uvd/uvd_5_0_sh_mask.h"
57 
58 #include "vce/vce_3_0_d.h"
59 #include "vce/vce_3_0_sh_mask.h"
60 
61 #include "dce/dce_10_0_d.h"
62 #include "dce/dce_10_0_sh_mask.h"
63 
64 #include "vid.h"
65 #include "vi.h"
66 #include "vi_dpm.h"
67 #include "gmc_v8_0.h"
68 #include "gmc_v7_0.h"
69 #include "gfx_v8_0.h"
70 #include "sdma_v2_4.h"
71 #include "sdma_v3_0.h"
72 #include "dce_v10_0.h"
73 #include "dce_v11_0.h"
74 #include "iceland_ih.h"
75 #include "tonga_ih.h"
76 #include "cz_ih.h"
77 #include "uvd_v5_0.h"
78 #include "uvd_v6_0.h"
79 #include "vce_v3_0.h"
80 
81 /*
82  * Indirect registers accessor
83  */
84 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
85 {
86 	unsigned long flags;
87 	u32 r;
88 
89 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
90 	WREG32(mmPCIE_INDEX, reg);
91 	(void)RREG32(mmPCIE_INDEX);
92 	r = RREG32(mmPCIE_DATA);
93 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
94 	return r;
95 }
96 
97 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
98 {
99 	unsigned long flags;
100 
101 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
102 	WREG32(mmPCIE_INDEX, reg);
103 	(void)RREG32(mmPCIE_INDEX);
104 	WREG32(mmPCIE_DATA, v);
105 	(void)RREG32(mmPCIE_DATA);
106 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
107 }
108 
109 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
110 {
111 	unsigned long flags;
112 	u32 r;
113 
114 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
115 	WREG32(mmSMC_IND_INDEX_0, (reg));
116 	r = RREG32(mmSMC_IND_DATA_0);
117 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
118 	return r;
119 }
120 
121 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
122 {
123 	unsigned long flags;
124 
125 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
126 	WREG32(mmSMC_IND_INDEX_0, (reg));
127 	WREG32(mmSMC_IND_DATA_0, (v));
128 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
129 }
130 
131 /* smu_8_0_d.h */
132 #define mmMP0PUB_IND_INDEX                                                      0x180
133 #define mmMP0PUB_IND_DATA                                                       0x181
134 
135 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
136 {
137 	unsigned long flags;
138 	u32 r;
139 
140 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
141 	WREG32(mmMP0PUB_IND_INDEX, (reg));
142 	r = RREG32(mmMP0PUB_IND_DATA);
143 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
144 	return r;
145 }
146 
147 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
148 {
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
152 	WREG32(mmMP0PUB_IND_INDEX, (reg));
153 	WREG32(mmMP0PUB_IND_DATA, (v));
154 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
155 }
156 
157 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
158 {
159 	unsigned long flags;
160 	u32 r;
161 
162 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
163 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
164 	r = RREG32(mmUVD_CTX_DATA);
165 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
166 	return r;
167 }
168 
169 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
170 {
171 	unsigned long flags;
172 
173 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
174 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
175 	WREG32(mmUVD_CTX_DATA, (v));
176 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
177 }
178 
179 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
180 {
181 	unsigned long flags;
182 	u32 r;
183 
184 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
185 	WREG32(mmDIDT_IND_INDEX, (reg));
186 	r = RREG32(mmDIDT_IND_DATA);
187 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
188 	return r;
189 }
190 
191 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
192 {
193 	unsigned long flags;
194 
195 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
196 	WREG32(mmDIDT_IND_INDEX, (reg));
197 	WREG32(mmDIDT_IND_DATA, (v));
198 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
199 }
200 
201 static const u32 tonga_mgcg_cgcg_init[] =
202 {
203 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
204 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
205 	mmPCIE_DATA, 0x000f0000, 0x00000000,
206 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
207 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
208 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
209 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
210 };
211 
212 static const u32 fiji_mgcg_cgcg_init[] =
213 {
214 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
215 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
216 	mmPCIE_DATA, 0x000f0000, 0x00000000,
217 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
218 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
219 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
220 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
221 };
222 
223 static const u32 iceland_mgcg_cgcg_init[] =
224 {
225 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
226 	mmPCIE_DATA, 0x000f0000, 0x00000000,
227 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
228 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
229 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
230 };
231 
232 static const u32 cz_mgcg_cgcg_init[] =
233 {
234 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
235 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
236 	mmPCIE_DATA, 0x000f0000, 0x00000000,
237 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
238 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
239 };
240 
241 static const u32 stoney_mgcg_cgcg_init[] =
242 {
243 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
244 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
245 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
246 };
247 
248 static void vi_init_golden_registers(struct amdgpu_device *adev)
249 {
250 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
251 	mutex_lock(&adev->grbm_idx_mutex);
252 
253 	switch (adev->asic_type) {
254 	case CHIP_TOPAZ:
255 		amdgpu_program_register_sequence(adev,
256 						 iceland_mgcg_cgcg_init,
257 						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
258 		break;
259 	case CHIP_FIJI:
260 		amdgpu_program_register_sequence(adev,
261 						 fiji_mgcg_cgcg_init,
262 						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
263 		break;
264 	case CHIP_TONGA:
265 		amdgpu_program_register_sequence(adev,
266 						 tonga_mgcg_cgcg_init,
267 						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
268 		break;
269 	case CHIP_CARRIZO:
270 		amdgpu_program_register_sequence(adev,
271 						 cz_mgcg_cgcg_init,
272 						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
273 		break;
274 	case CHIP_STONEY:
275 		amdgpu_program_register_sequence(adev,
276 						 stoney_mgcg_cgcg_init,
277 						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
278 		break;
279 	default:
280 		break;
281 	}
282 	mutex_unlock(&adev->grbm_idx_mutex);
283 }
284 
285 /**
286  * vi_get_xclk - get the xclk
287  *
288  * @adev: amdgpu_device pointer
289  *
290  * Returns the reference clock used by the gfx engine
291  * (VI).
292  */
293 static u32 vi_get_xclk(struct amdgpu_device *adev)
294 {
295 	u32 reference_clock = adev->clock.spll.reference_freq;
296 	u32 tmp;
297 
298 	if (adev->flags & AMD_IS_APU)
299 		return reference_clock;
300 
301 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
302 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
303 		return 1000;
304 
305 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
306 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
307 		return reference_clock / 4;
308 
309 	return reference_clock;
310 }
311 
312 /**
313  * vi_srbm_select - select specific register instances
314  *
315  * @adev: amdgpu_device pointer
316  * @me: selected ME (micro engine)
317  * @pipe: pipe
318  * @queue: queue
319  * @vmid: VMID
320  *
321  * Switches the currently active registers instances.  Some
322  * registers are instanced per VMID, others are instanced per
323  * me/pipe/queue combination.
324  */
325 void vi_srbm_select(struct amdgpu_device *adev,
326 		     u32 me, u32 pipe, u32 queue, u32 vmid)
327 {
328 	u32 srbm_gfx_cntl = 0;
329 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
330 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
331 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
332 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
333 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
334 }
335 
336 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
337 {
338 	/* todo */
339 }
340 
341 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
342 {
343 	u32 bus_cntl;
344 	u32 d1vga_control = 0;
345 	u32 d2vga_control = 0;
346 	u32 vga_render_control = 0;
347 	u32 rom_cntl;
348 	bool r;
349 
350 	bus_cntl = RREG32(mmBUS_CNTL);
351 	if (adev->mode_info.num_crtc) {
352 		d1vga_control = RREG32(mmD1VGA_CONTROL);
353 		d2vga_control = RREG32(mmD2VGA_CONTROL);
354 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
355 	}
356 	rom_cntl = RREG32_SMC(ixROM_CNTL);
357 
358 	/* enable the rom */
359 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
360 	if (adev->mode_info.num_crtc) {
361 		/* Disable VGA mode */
362 		WREG32(mmD1VGA_CONTROL,
363 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
364 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
365 		WREG32(mmD2VGA_CONTROL,
366 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
367 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
368 		WREG32(mmVGA_RENDER_CONTROL,
369 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
370 	}
371 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
372 
373 	r = amdgpu_read_bios(adev);
374 
375 	/* restore regs */
376 	WREG32(mmBUS_CNTL, bus_cntl);
377 	if (adev->mode_info.num_crtc) {
378 		WREG32(mmD1VGA_CONTROL, d1vga_control);
379 		WREG32(mmD2VGA_CONTROL, d2vga_control);
380 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
381 	}
382 	WREG32_SMC(ixROM_CNTL, rom_cntl);
383 	return r;
384 }
385 static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
386 	{mmGB_MACROTILE_MODE7, true},
387 };
388 
389 static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
390 	{mmGB_TILE_MODE7, true},
391 	{mmGB_TILE_MODE12, true},
392 	{mmGB_TILE_MODE17, true},
393 	{mmGB_TILE_MODE23, true},
394 	{mmGB_MACROTILE_MODE7, true},
395 };
396 
397 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
398 	{mmGRBM_STATUS, false},
399 	{mmGRBM_STATUS2, false},
400 	{mmGRBM_STATUS_SE0, false},
401 	{mmGRBM_STATUS_SE1, false},
402 	{mmGRBM_STATUS_SE2, false},
403 	{mmGRBM_STATUS_SE3, false},
404 	{mmSRBM_STATUS, false},
405 	{mmSRBM_STATUS2, false},
406 	{mmSRBM_STATUS3, false},
407 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
408 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
409 	{mmCP_STAT, false},
410 	{mmCP_STALLED_STAT1, false},
411 	{mmCP_STALLED_STAT2, false},
412 	{mmCP_STALLED_STAT3, false},
413 	{mmCP_CPF_BUSY_STAT, false},
414 	{mmCP_CPF_STALLED_STAT1, false},
415 	{mmCP_CPF_STATUS, false},
416 	{mmCP_CPC_BUSY_STAT, false},
417 	{mmCP_CPC_STALLED_STAT1, false},
418 	{mmCP_CPC_STATUS, false},
419 	{mmGB_ADDR_CONFIG, false},
420 	{mmMC_ARB_RAMCFG, false},
421 	{mmGB_TILE_MODE0, false},
422 	{mmGB_TILE_MODE1, false},
423 	{mmGB_TILE_MODE2, false},
424 	{mmGB_TILE_MODE3, false},
425 	{mmGB_TILE_MODE4, false},
426 	{mmGB_TILE_MODE5, false},
427 	{mmGB_TILE_MODE6, false},
428 	{mmGB_TILE_MODE7, false},
429 	{mmGB_TILE_MODE8, false},
430 	{mmGB_TILE_MODE9, false},
431 	{mmGB_TILE_MODE10, false},
432 	{mmGB_TILE_MODE11, false},
433 	{mmGB_TILE_MODE12, false},
434 	{mmGB_TILE_MODE13, false},
435 	{mmGB_TILE_MODE14, false},
436 	{mmGB_TILE_MODE15, false},
437 	{mmGB_TILE_MODE16, false},
438 	{mmGB_TILE_MODE17, false},
439 	{mmGB_TILE_MODE18, false},
440 	{mmGB_TILE_MODE19, false},
441 	{mmGB_TILE_MODE20, false},
442 	{mmGB_TILE_MODE21, false},
443 	{mmGB_TILE_MODE22, false},
444 	{mmGB_TILE_MODE23, false},
445 	{mmGB_TILE_MODE24, false},
446 	{mmGB_TILE_MODE25, false},
447 	{mmGB_TILE_MODE26, false},
448 	{mmGB_TILE_MODE27, false},
449 	{mmGB_TILE_MODE28, false},
450 	{mmGB_TILE_MODE29, false},
451 	{mmGB_TILE_MODE30, false},
452 	{mmGB_TILE_MODE31, false},
453 	{mmGB_MACROTILE_MODE0, false},
454 	{mmGB_MACROTILE_MODE1, false},
455 	{mmGB_MACROTILE_MODE2, false},
456 	{mmGB_MACROTILE_MODE3, false},
457 	{mmGB_MACROTILE_MODE4, false},
458 	{mmGB_MACROTILE_MODE5, false},
459 	{mmGB_MACROTILE_MODE6, false},
460 	{mmGB_MACROTILE_MODE7, false},
461 	{mmGB_MACROTILE_MODE8, false},
462 	{mmGB_MACROTILE_MODE9, false},
463 	{mmGB_MACROTILE_MODE10, false},
464 	{mmGB_MACROTILE_MODE11, false},
465 	{mmGB_MACROTILE_MODE12, false},
466 	{mmGB_MACROTILE_MODE13, false},
467 	{mmGB_MACROTILE_MODE14, false},
468 	{mmGB_MACROTILE_MODE15, false},
469 	{mmCC_RB_BACKEND_DISABLE, false, true},
470 	{mmGC_USER_RB_BACKEND_DISABLE, false, true},
471 	{mmGB_BACKEND_MAP, false, false},
472 	{mmPA_SC_RASTER_CONFIG, false, true},
473 	{mmPA_SC_RASTER_CONFIG_1, false, true},
474 };
475 
476 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
477 					 u32 sh_num, u32 reg_offset)
478 {
479 	uint32_t val;
480 
481 	mutex_lock(&adev->grbm_idx_mutex);
482 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
483 		gfx_v8_0_select_se_sh(adev, se_num, sh_num);
484 
485 	val = RREG32(reg_offset);
486 
487 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
488 		gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
489 	mutex_unlock(&adev->grbm_idx_mutex);
490 	return val;
491 }
492 
493 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
494 			    u32 sh_num, u32 reg_offset, u32 *value)
495 {
496 	struct amdgpu_allowed_register_entry *asic_register_table = NULL;
497 	struct amdgpu_allowed_register_entry *asic_register_entry;
498 	uint32_t size, i;
499 
500 	*value = 0;
501 	switch (adev->asic_type) {
502 	case CHIP_TOPAZ:
503 		asic_register_table = tonga_allowed_read_registers;
504 		size = ARRAY_SIZE(tonga_allowed_read_registers);
505 		break;
506 	case CHIP_FIJI:
507 	case CHIP_TONGA:
508 	case CHIP_CARRIZO:
509 	case CHIP_STONEY:
510 		asic_register_table = cz_allowed_read_registers;
511 		size = ARRAY_SIZE(cz_allowed_read_registers);
512 		break;
513 	default:
514 		return -EINVAL;
515 	}
516 
517 	if (asic_register_table) {
518 		for (i = 0; i < size; i++) {
519 			asic_register_entry = asic_register_table + i;
520 			if (reg_offset != asic_register_entry->reg_offset)
521 				continue;
522 			if (!asic_register_entry->untouched)
523 				*value = asic_register_entry->grbm_indexed ?
524 					vi_read_indexed_register(adev, se_num,
525 								 sh_num, reg_offset) :
526 					RREG32(reg_offset);
527 			return 0;
528 		}
529 	}
530 
531 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
532 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
533 			continue;
534 
535 		if (!vi_allowed_read_registers[i].untouched)
536 			*value = vi_allowed_read_registers[i].grbm_indexed ?
537 				vi_read_indexed_register(adev, se_num,
538 							 sh_num, reg_offset) :
539 				RREG32(reg_offset);
540 		return 0;
541 	}
542 	return -EINVAL;
543 }
544 
545 static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
546 {
547 	dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
548 		RREG32(mmGRBM_STATUS));
549 	dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
550 		RREG32(mmGRBM_STATUS2));
551 	dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
552 		RREG32(mmGRBM_STATUS_SE0));
553 	dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
554 		RREG32(mmGRBM_STATUS_SE1));
555 	dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
556 		RREG32(mmGRBM_STATUS_SE2));
557 	dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
558 		RREG32(mmGRBM_STATUS_SE3));
559 	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
560 		RREG32(mmSRBM_STATUS));
561 	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
562 		RREG32(mmSRBM_STATUS2));
563 	dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
564 		RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
565 	if (adev->sdma.num_instances > 1) {
566 		dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
567 			RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
568 	}
569 	dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
570 	dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
571 		 RREG32(mmCP_STALLED_STAT1));
572 	dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
573 		 RREG32(mmCP_STALLED_STAT2));
574 	dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
575 		 RREG32(mmCP_STALLED_STAT3));
576 	dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
577 		 RREG32(mmCP_CPF_BUSY_STAT));
578 	dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
579 		 RREG32(mmCP_CPF_STALLED_STAT1));
580 	dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
581 	dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
582 	dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
583 		 RREG32(mmCP_CPC_STALLED_STAT1));
584 	dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
585 }
586 
587 /**
588  * vi_gpu_check_soft_reset - check which blocks are busy
589  *
590  * @adev: amdgpu_device pointer
591  *
592  * Check which blocks are busy and return the relevant reset
593  * mask to be used by vi_gpu_soft_reset().
594  * Returns a mask of the blocks to be reset.
595  */
596 static u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
597 {
598 	u32 reset_mask = 0;
599 	u32 tmp;
600 
601 	/* GRBM_STATUS */
602 	tmp = RREG32(mmGRBM_STATUS);
603 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
604 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
605 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
606 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
607 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
608 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
609 		reset_mask |= AMDGPU_RESET_GFX;
610 
611 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
612 		reset_mask |= AMDGPU_RESET_CP;
613 
614 	/* GRBM_STATUS2 */
615 	tmp = RREG32(mmGRBM_STATUS2);
616 	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
617 		reset_mask |= AMDGPU_RESET_RLC;
618 
619 	if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
620 		   GRBM_STATUS2__CPC_BUSY_MASK |
621 		   GRBM_STATUS2__CPG_BUSY_MASK))
622 		reset_mask |= AMDGPU_RESET_CP;
623 
624 	/* SRBM_STATUS2 */
625 	tmp = RREG32(mmSRBM_STATUS2);
626 	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
627 		reset_mask |= AMDGPU_RESET_DMA;
628 
629 	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
630 		reset_mask |= AMDGPU_RESET_DMA1;
631 
632 	/* SRBM_STATUS */
633 	tmp = RREG32(mmSRBM_STATUS);
634 
635 	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
636 		reset_mask |= AMDGPU_RESET_IH;
637 
638 	if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
639 		reset_mask |= AMDGPU_RESET_SEM;
640 
641 	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
642 		reset_mask |= AMDGPU_RESET_GRBM;
643 
644 	if (adev->asic_type != CHIP_TOPAZ) {
645 		if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
646 			   SRBM_STATUS__UVD_BUSY_MASK))
647 			reset_mask |= AMDGPU_RESET_UVD;
648 	}
649 
650 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
651 		reset_mask |= AMDGPU_RESET_VMC;
652 
653 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
654 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
655 		reset_mask |= AMDGPU_RESET_MC;
656 
657 	/* SDMA0_STATUS_REG */
658 	tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
659 	if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
660 		reset_mask |= AMDGPU_RESET_DMA;
661 
662 	/* SDMA1_STATUS_REG */
663 	if (adev->sdma.num_instances > 1) {
664 		tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
665 		if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
666 			reset_mask |= AMDGPU_RESET_DMA1;
667 	}
668 #if 0
669 	/* VCE_STATUS */
670 	if (adev->asic_type != CHIP_TOPAZ) {
671 		tmp = RREG32(mmVCE_STATUS);
672 		if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
673 			reset_mask |= AMDGPU_RESET_VCE;
674 		if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
675 			reset_mask |= AMDGPU_RESET_VCE1;
676 
677 	}
678 
679 	if (adev->asic_type != CHIP_TOPAZ) {
680 		if (amdgpu_display_is_display_hung(adev))
681 			reset_mask |= AMDGPU_RESET_DISPLAY;
682 	}
683 #endif
684 
685 	/* Skip MC reset as it's mostly likely not hung, just busy */
686 	if (reset_mask & AMDGPU_RESET_MC) {
687 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
688 		reset_mask &= ~AMDGPU_RESET_MC;
689 	}
690 
691 	return reset_mask;
692 }
693 
694 /**
695  * vi_gpu_soft_reset - soft reset GPU
696  *
697  * @adev: amdgpu_device pointer
698  * @reset_mask: mask of which blocks to reset
699  *
700  * Soft reset the blocks specified in @reset_mask.
701  */
702 static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
703 {
704 	struct amdgpu_mode_mc_save save;
705 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
706 	u32 tmp;
707 
708 	if (reset_mask == 0)
709 		return;
710 
711 	dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
712 
713 	vi_print_gpu_status_regs(adev);
714 	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
715 		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
716 	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
717 		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
718 
719 	/* disable CG/PG */
720 
721 	/* stop the rlc */
722 	//XXX
723 	//gfx_v8_0_rlc_stop(adev);
724 
725 	/* Disable GFX parsing/prefetching */
726 	tmp = RREG32(mmCP_ME_CNTL);
727 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
728 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
729 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
730 	WREG32(mmCP_ME_CNTL, tmp);
731 
732 	/* Disable MEC parsing/prefetching */
733 	tmp = RREG32(mmCP_MEC_CNTL);
734 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
735 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
736 	WREG32(mmCP_MEC_CNTL, tmp);
737 
738 	if (reset_mask & AMDGPU_RESET_DMA) {
739 		/* sdma0 */
740 		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
741 		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
742 		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
743 	}
744 	if (reset_mask & AMDGPU_RESET_DMA1) {
745 		/* sdma1 */
746 		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
747 		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
748 		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
749 	}
750 
751 	gmc_v8_0_mc_stop(adev, &save);
752 	if (amdgpu_asic_wait_for_mc_idle(adev)) {
753 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
754 	}
755 
756 	if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
757 		grbm_soft_reset =
758 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
759 		grbm_soft_reset =
760 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
761 	}
762 
763 	if (reset_mask & AMDGPU_RESET_CP) {
764 		grbm_soft_reset =
765 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
766 		srbm_soft_reset =
767 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
768 	}
769 
770 	if (reset_mask & AMDGPU_RESET_DMA)
771 		srbm_soft_reset =
772 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
773 
774 	if (reset_mask & AMDGPU_RESET_DMA1)
775 		srbm_soft_reset =
776 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
777 
778 	if (reset_mask & AMDGPU_RESET_DISPLAY)
779 		srbm_soft_reset =
780 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
781 
782 	if (reset_mask & AMDGPU_RESET_RLC)
783 		grbm_soft_reset =
784 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
785 
786 	if (reset_mask & AMDGPU_RESET_SEM)
787 		srbm_soft_reset =
788 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
789 
790 	if (reset_mask & AMDGPU_RESET_IH)
791 		srbm_soft_reset =
792 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
793 
794 	if (reset_mask & AMDGPU_RESET_GRBM)
795 		srbm_soft_reset =
796 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
797 
798 	if (reset_mask & AMDGPU_RESET_VMC)
799 		srbm_soft_reset =
800 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
801 
802 	if (reset_mask & AMDGPU_RESET_UVD)
803 		srbm_soft_reset =
804 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
805 
806 	if (reset_mask & AMDGPU_RESET_VCE)
807 		srbm_soft_reset =
808 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
809 
810 	if (reset_mask & AMDGPU_RESET_VCE)
811 		srbm_soft_reset =
812 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
813 
814 	if (!(adev->flags & AMD_IS_APU)) {
815 		if (reset_mask & AMDGPU_RESET_MC)
816 		srbm_soft_reset =
817 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
818 	}
819 
820 	if (grbm_soft_reset) {
821 		tmp = RREG32(mmGRBM_SOFT_RESET);
822 		tmp |= grbm_soft_reset;
823 		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
824 		WREG32(mmGRBM_SOFT_RESET, tmp);
825 		tmp = RREG32(mmGRBM_SOFT_RESET);
826 
827 		udelay(50);
828 
829 		tmp &= ~grbm_soft_reset;
830 		WREG32(mmGRBM_SOFT_RESET, tmp);
831 		tmp = RREG32(mmGRBM_SOFT_RESET);
832 	}
833 
834 	if (srbm_soft_reset) {
835 		tmp = RREG32(mmSRBM_SOFT_RESET);
836 		tmp |= srbm_soft_reset;
837 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
838 		WREG32(mmSRBM_SOFT_RESET, tmp);
839 		tmp = RREG32(mmSRBM_SOFT_RESET);
840 
841 		udelay(50);
842 
843 		tmp &= ~srbm_soft_reset;
844 		WREG32(mmSRBM_SOFT_RESET, tmp);
845 		tmp = RREG32(mmSRBM_SOFT_RESET);
846 	}
847 
848 	/* Wait a little for things to settle down */
849 	udelay(50);
850 
851 	gmc_v8_0_mc_resume(adev, &save);
852 	udelay(50);
853 
854 	vi_print_gpu_status_regs(adev);
855 }
856 
857 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
858 {
859 	struct amdgpu_mode_mc_save save;
860 	u32 tmp, i;
861 
862 	dev_info(adev->dev, "GPU pci config reset\n");
863 
864 	/* disable dpm? */
865 
866 	/* disable cg/pg */
867 
868 	/* Disable GFX parsing/prefetching */
869 	tmp = RREG32(mmCP_ME_CNTL);
870 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
871 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
872 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
873 	WREG32(mmCP_ME_CNTL, tmp);
874 
875 	/* Disable MEC parsing/prefetching */
876 	tmp = RREG32(mmCP_MEC_CNTL);
877 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
878 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
879 	WREG32(mmCP_MEC_CNTL, tmp);
880 
881 	/* Disable GFX parsing/prefetching */
882 	WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
883 		CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
884 
885 	/* Disable MEC parsing/prefetching */
886 	WREG32(mmCP_MEC_CNTL,
887 			CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
888 
889 	/* sdma0 */
890 	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
891 	tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
892 	WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
893 
894 	/* sdma1 */
895 	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
896 	tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
897 	WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
898 
899 	/* XXX other engines? */
900 
901 	/* halt the rlc, disable cp internal ints */
902 	//XXX
903 	//gfx_v8_0_rlc_stop(adev);
904 
905 	udelay(50);
906 
907 	/* disable mem access */
908 	gmc_v8_0_mc_stop(adev, &save);
909 	if (amdgpu_asic_wait_for_mc_idle(adev)) {
910 		dev_warn(adev->dev, "Wait for MC idle timed out !\n");
911 	}
912 
913 	/* disable BM */
914 	pci_clear_master(adev->pdev);
915 	/* reset */
916 	amdgpu_pci_config_reset(adev);
917 
918 	udelay(100);
919 
920 	/* wait for asic to come out of reset */
921 	for (i = 0; i < adev->usec_timeout; i++) {
922 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
923 			break;
924 		udelay(1);
925 	}
926 
927 }
928 
929 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
930 {
931 	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
932 
933 	if (hung)
934 		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
935 	else
936 		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
937 
938 	WREG32(mmBIOS_SCRATCH_3, tmp);
939 }
940 
941 /**
942  * vi_asic_reset - soft reset GPU
943  *
944  * @adev: amdgpu_device pointer
945  *
946  * Look up which blocks are hung and attempt
947  * to reset them.
948  * Returns 0 for success.
949  */
950 static int vi_asic_reset(struct amdgpu_device *adev)
951 {
952 	u32 reset_mask;
953 
954 	reset_mask = vi_gpu_check_soft_reset(adev);
955 
956 	if (reset_mask)
957 		vi_set_bios_scratch_engine_hung(adev, true);
958 
959 	/* try soft reset */
960 	vi_gpu_soft_reset(adev, reset_mask);
961 
962 	reset_mask = vi_gpu_check_soft_reset(adev);
963 
964 	/* try pci config reset */
965 	if (reset_mask && amdgpu_hard_reset)
966 		vi_gpu_pci_config_reset(adev);
967 
968 	reset_mask = vi_gpu_check_soft_reset(adev);
969 
970 	if (!reset_mask)
971 		vi_set_bios_scratch_engine_hung(adev, false);
972 
973 	return 0;
974 }
975 
976 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
977 			u32 cntl_reg, u32 status_reg)
978 {
979 	int r, i;
980 	struct atom_clock_dividers dividers;
981 	uint32_t tmp;
982 
983 	r = amdgpu_atombios_get_clock_dividers(adev,
984 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
985 					       clock, false, &dividers);
986 	if (r)
987 		return r;
988 
989 	tmp = RREG32_SMC(cntl_reg);
990 	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
991 		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
992 	tmp |= dividers.post_divider;
993 	WREG32_SMC(cntl_reg, tmp);
994 
995 	for (i = 0; i < 100; i++) {
996 		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
997 			break;
998 		mdelay(10);
999 	}
1000 	if (i == 100)
1001 		return -ETIMEDOUT;
1002 
1003 	return 0;
1004 }
1005 
1006 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1007 {
1008 	int r;
1009 
1010 	r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
1011 	if (r)
1012 		return r;
1013 
1014 	r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
1015 
1016 	return 0;
1017 }
1018 
1019 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1020 {
1021 	/* todo */
1022 
1023 	return 0;
1024 }
1025 
1026 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
1027 {
1028 #ifndef __NetBSD__		/* XXX amdgpu pcie */
1029 	u32 mask;
1030 	int ret;
1031 
1032 	if (pci_is_root_bus(adev->pdev->bus))
1033 		return;
1034 
1035 	if (amdgpu_pcie_gen2 == 0)
1036 		return;
1037 
1038 	if (adev->flags & AMD_IS_APU)
1039 		return;
1040 
1041 	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
1042 	if (ret != 0)
1043 		return;
1044 
1045 	if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1046 		return;
1047 
1048 	/* todo */
1049 #endif
1050 }
1051 
1052 static void vi_program_aspm(struct amdgpu_device *adev)
1053 {
1054 
1055 	if (amdgpu_aspm == 0)
1056 		return;
1057 
1058 	/* todo */
1059 }
1060 
1061 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
1062 					bool enable)
1063 {
1064 	u32 tmp;
1065 
1066 	/* not necessary on CZ */
1067 	if (adev->flags & AMD_IS_APU)
1068 		return;
1069 
1070 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1071 	if (enable)
1072 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1073 	else
1074 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1075 
1076 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1077 }
1078 
1079 /* topaz has no DCE, UVD, VCE */
1080 static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
1081 {
1082 	/* ORDER MATTERS! */
1083 	{
1084 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1085 		.major = 2,
1086 		.minor = 0,
1087 		.rev = 0,
1088 		.funcs = &vi_common_ip_funcs,
1089 	},
1090 	{
1091 		.type = AMD_IP_BLOCK_TYPE_GMC,
1092 		.major = 7,
1093 		.minor = 4,
1094 		.rev = 0,
1095 		.funcs = &gmc_v7_0_ip_funcs,
1096 	},
1097 	{
1098 		.type = AMD_IP_BLOCK_TYPE_IH,
1099 		.major = 2,
1100 		.minor = 4,
1101 		.rev = 0,
1102 		.funcs = &iceland_ih_ip_funcs,
1103 	},
1104 	{
1105 		.type = AMD_IP_BLOCK_TYPE_SMC,
1106 		.major = 7,
1107 		.minor = 1,
1108 		.rev = 0,
1109 		.funcs = &iceland_dpm_ip_funcs,
1110 	},
1111 	{
1112 		.type = AMD_IP_BLOCK_TYPE_GFX,
1113 		.major = 8,
1114 		.minor = 0,
1115 		.rev = 0,
1116 		.funcs = &gfx_v8_0_ip_funcs,
1117 	},
1118 	{
1119 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1120 		.major = 2,
1121 		.minor = 4,
1122 		.rev = 0,
1123 		.funcs = &sdma_v2_4_ip_funcs,
1124 	},
1125 };
1126 
1127 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
1128 {
1129 	/* ORDER MATTERS! */
1130 	{
1131 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1132 		.major = 2,
1133 		.minor = 0,
1134 		.rev = 0,
1135 		.funcs = &vi_common_ip_funcs,
1136 	},
1137 	{
1138 		.type = AMD_IP_BLOCK_TYPE_GMC,
1139 		.major = 8,
1140 		.minor = 0,
1141 		.rev = 0,
1142 		.funcs = &gmc_v8_0_ip_funcs,
1143 	},
1144 	{
1145 		.type = AMD_IP_BLOCK_TYPE_IH,
1146 		.major = 3,
1147 		.minor = 0,
1148 		.rev = 0,
1149 		.funcs = &tonga_ih_ip_funcs,
1150 	},
1151 	{
1152 		.type = AMD_IP_BLOCK_TYPE_SMC,
1153 		.major = 7,
1154 		.minor = 1,
1155 		.rev = 0,
1156 		.funcs = &tonga_dpm_ip_funcs,
1157 	},
1158 	{
1159 		.type = AMD_IP_BLOCK_TYPE_DCE,
1160 		.major = 10,
1161 		.minor = 0,
1162 		.rev = 0,
1163 		.funcs = &dce_v10_0_ip_funcs,
1164 	},
1165 	{
1166 		.type = AMD_IP_BLOCK_TYPE_GFX,
1167 		.major = 8,
1168 		.minor = 0,
1169 		.rev = 0,
1170 		.funcs = &gfx_v8_0_ip_funcs,
1171 	},
1172 	{
1173 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1174 		.major = 3,
1175 		.minor = 0,
1176 		.rev = 0,
1177 		.funcs = &sdma_v3_0_ip_funcs,
1178 	},
1179 	{
1180 		.type = AMD_IP_BLOCK_TYPE_UVD,
1181 		.major = 5,
1182 		.minor = 0,
1183 		.rev = 0,
1184 		.funcs = &uvd_v5_0_ip_funcs,
1185 	},
1186 	{
1187 		.type = AMD_IP_BLOCK_TYPE_VCE,
1188 		.major = 3,
1189 		.minor = 0,
1190 		.rev = 0,
1191 		.funcs = &vce_v3_0_ip_funcs,
1192 	},
1193 };
1194 
1195 static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
1196 {
1197 	/* ORDER MATTERS! */
1198 	{
1199 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1200 		.major = 2,
1201 		.minor = 0,
1202 		.rev = 0,
1203 		.funcs = &vi_common_ip_funcs,
1204 	},
1205 	{
1206 		.type = AMD_IP_BLOCK_TYPE_GMC,
1207 		.major = 8,
1208 		.minor = 5,
1209 		.rev = 0,
1210 		.funcs = &gmc_v8_0_ip_funcs,
1211 	},
1212 	{
1213 		.type = AMD_IP_BLOCK_TYPE_IH,
1214 		.major = 3,
1215 		.minor = 0,
1216 		.rev = 0,
1217 		.funcs = &tonga_ih_ip_funcs,
1218 	},
1219 	{
1220 		.type = AMD_IP_BLOCK_TYPE_SMC,
1221 		.major = 7,
1222 		.minor = 1,
1223 		.rev = 0,
1224 		.funcs = &fiji_dpm_ip_funcs,
1225 	},
1226 	{
1227 		.type = AMD_IP_BLOCK_TYPE_DCE,
1228 		.major = 10,
1229 		.minor = 1,
1230 		.rev = 0,
1231 		.funcs = &dce_v10_0_ip_funcs,
1232 	},
1233 	{
1234 		.type = AMD_IP_BLOCK_TYPE_GFX,
1235 		.major = 8,
1236 		.minor = 0,
1237 		.rev = 0,
1238 		.funcs = &gfx_v8_0_ip_funcs,
1239 	},
1240 	{
1241 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1242 		.major = 3,
1243 		.minor = 0,
1244 		.rev = 0,
1245 		.funcs = &sdma_v3_0_ip_funcs,
1246 	},
1247 	{
1248 		.type = AMD_IP_BLOCK_TYPE_UVD,
1249 		.major = 6,
1250 		.minor = 0,
1251 		.rev = 0,
1252 		.funcs = &uvd_v6_0_ip_funcs,
1253 	},
1254 	{
1255 		.type = AMD_IP_BLOCK_TYPE_VCE,
1256 		.major = 3,
1257 		.minor = 0,
1258 		.rev = 0,
1259 		.funcs = &vce_v3_0_ip_funcs,
1260 	},
1261 };
1262 
1263 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
1264 {
1265 	/* ORDER MATTERS! */
1266 	{
1267 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1268 		.major = 2,
1269 		.minor = 0,
1270 		.rev = 0,
1271 		.funcs = &vi_common_ip_funcs,
1272 	},
1273 	{
1274 		.type = AMD_IP_BLOCK_TYPE_GMC,
1275 		.major = 8,
1276 		.minor = 0,
1277 		.rev = 0,
1278 		.funcs = &gmc_v8_0_ip_funcs,
1279 	},
1280 	{
1281 		.type = AMD_IP_BLOCK_TYPE_IH,
1282 		.major = 3,
1283 		.minor = 0,
1284 		.rev = 0,
1285 		.funcs = &cz_ih_ip_funcs,
1286 	},
1287 	{
1288 		.type = AMD_IP_BLOCK_TYPE_SMC,
1289 		.major = 8,
1290 		.minor = 0,
1291 		.rev = 0,
1292 		.funcs = &cz_dpm_ip_funcs,
1293 	},
1294 	{
1295 		.type = AMD_IP_BLOCK_TYPE_DCE,
1296 		.major = 11,
1297 		.minor = 0,
1298 		.rev = 0,
1299 		.funcs = &dce_v11_0_ip_funcs,
1300 	},
1301 	{
1302 		.type = AMD_IP_BLOCK_TYPE_GFX,
1303 		.major = 8,
1304 		.minor = 0,
1305 		.rev = 0,
1306 		.funcs = &gfx_v8_0_ip_funcs,
1307 	},
1308 	{
1309 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1310 		.major = 3,
1311 		.minor = 0,
1312 		.rev = 0,
1313 		.funcs = &sdma_v3_0_ip_funcs,
1314 	},
1315 	{
1316 		.type = AMD_IP_BLOCK_TYPE_UVD,
1317 		.major = 6,
1318 		.minor = 0,
1319 		.rev = 0,
1320 		.funcs = &uvd_v6_0_ip_funcs,
1321 	},
1322 	{
1323 		.type = AMD_IP_BLOCK_TYPE_VCE,
1324 		.major = 3,
1325 		.minor = 0,
1326 		.rev = 0,
1327 		.funcs = &vce_v3_0_ip_funcs,
1328 	},
1329 };
1330 
1331 int vi_set_ip_blocks(struct amdgpu_device *adev)
1332 {
1333 	switch (adev->asic_type) {
1334 	case CHIP_TOPAZ:
1335 		adev->ip_blocks = topaz_ip_blocks;
1336 		adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
1337 		break;
1338 	case CHIP_FIJI:
1339 		adev->ip_blocks = fiji_ip_blocks;
1340 		adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
1341 		break;
1342 	case CHIP_TONGA:
1343 		adev->ip_blocks = tonga_ip_blocks;
1344 		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
1345 		break;
1346 	case CHIP_CARRIZO:
1347 	case CHIP_STONEY:
1348 		adev->ip_blocks = cz_ip_blocks;
1349 		adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
1350 		break;
1351 	default:
1352 		/* FIXME: not supported yet */
1353 		return -EINVAL;
1354 	}
1355 
1356 	return 0;
1357 }
1358 
1359 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
1360 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
1361 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
1362 
1363 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1364 {
1365 	if (adev->asic_type == CHIP_TOPAZ)
1366 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1367 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1368 	else if (adev->flags & AMD_IS_APU)
1369 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1370 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
1371 	else
1372 		return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
1373 			>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
1374 }
1375 
1376 static const struct amdgpu_asic_funcs vi_asic_funcs =
1377 {
1378 	.read_disabled_bios = &vi_read_disabled_bios,
1379 	.read_register = &vi_read_register,
1380 	.reset = &vi_asic_reset,
1381 	.set_vga_state = &vi_vga_set_state,
1382 	.get_xclk = &vi_get_xclk,
1383 	.set_uvd_clocks = &vi_set_uvd_clocks,
1384 	.set_vce_clocks = &vi_set_vce_clocks,
1385 	.get_cu_info = &gfx_v8_0_get_cu_info,
1386 	/* these should be moved to their own ip modules */
1387 	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1388 	.wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
1389 };
1390 
1391 static int vi_common_early_init(void *handle)
1392 {
1393 	bool smc_enabled = false;
1394 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1395 
1396 	if (adev->flags & AMD_IS_APU) {
1397 		adev->smc_rreg = &cz_smc_rreg;
1398 		adev->smc_wreg = &cz_smc_wreg;
1399 	} else {
1400 		adev->smc_rreg = &vi_smc_rreg;
1401 		adev->smc_wreg = &vi_smc_wreg;
1402 	}
1403 	adev->pcie_rreg = &vi_pcie_rreg;
1404 	adev->pcie_wreg = &vi_pcie_wreg;
1405 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1406 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1407 	adev->didt_rreg = &vi_didt_rreg;
1408 	adev->didt_wreg = &vi_didt_wreg;
1409 
1410 	adev->asic_funcs = &vi_asic_funcs;
1411 
1412 	if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
1413 		(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
1414 		smc_enabled = true;
1415 
1416 	adev->rev_id = vi_get_rev_id(adev);
1417 	adev->external_rev_id = 0xFF;
1418 	switch (adev->asic_type) {
1419 	case CHIP_TOPAZ:
1420 		adev->has_uvd = false;
1421 		adev->cg_flags = 0;
1422 		adev->pg_flags = 0;
1423 		adev->external_rev_id = 0x1;
1424 		break;
1425 	case CHIP_FIJI:
1426 		adev->has_uvd = true;
1427 		adev->cg_flags = 0;
1428 		adev->pg_flags = 0;
1429 		adev->external_rev_id = adev->rev_id + 0x3c;
1430 		break;
1431 	case CHIP_TONGA:
1432 		adev->has_uvd = true;
1433 		adev->cg_flags = 0;
1434 		adev->pg_flags = 0;
1435 		adev->external_rev_id = adev->rev_id + 0x14;
1436 		break;
1437 	case CHIP_CARRIZO:
1438 	case CHIP_STONEY:
1439 		adev->has_uvd = true;
1440 		adev->cg_flags = 0;
1441 		/* Disable UVD pg */
1442 		adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1443 		adev->external_rev_id = adev->rev_id + 0x1;
1444 		break;
1445 	default:
1446 		/* FIXME: not supported yet */
1447 		return -EINVAL;
1448 	}
1449 
1450 	if (amdgpu_smc_load_fw && smc_enabled)
1451 		adev->firmware.smu_load = true;
1452 
1453 	return 0;
1454 }
1455 
1456 static int vi_common_sw_init(void *handle)
1457 {
1458 	return 0;
1459 }
1460 
1461 static int vi_common_sw_fini(void *handle)
1462 {
1463 	return 0;
1464 }
1465 
1466 static int vi_common_hw_init(void *handle)
1467 {
1468 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1469 
1470 	/* move the golden regs per IP block */
1471 	vi_init_golden_registers(adev);
1472 	/* enable pcie gen2/3 link */
1473 	vi_pcie_gen3_enable(adev);
1474 	/* enable aspm */
1475 	vi_program_aspm(adev);
1476 	/* enable the doorbell aperture */
1477 	vi_enable_doorbell_aperture(adev, true);
1478 
1479 	return 0;
1480 }
1481 
1482 static int vi_common_hw_fini(void *handle)
1483 {
1484 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1485 
1486 	/* enable the doorbell aperture */
1487 	vi_enable_doorbell_aperture(adev, false);
1488 
1489 	return 0;
1490 }
1491 
1492 static int vi_common_suspend(void *handle)
1493 {
1494 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1495 
1496 	return vi_common_hw_fini(adev);
1497 }
1498 
1499 static int vi_common_resume(void *handle)
1500 {
1501 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1502 
1503 	return vi_common_hw_init(adev);
1504 }
1505 
1506 static bool vi_common_is_idle(void *handle)
1507 {
1508 	return true;
1509 }
1510 
1511 static int vi_common_wait_for_idle(void *handle)
1512 {
1513 	return 0;
1514 }
1515 
1516 static void vi_common_print_status(void *handle)
1517 {
1518 	return;
1519 }
1520 
1521 static int vi_common_soft_reset(void *handle)
1522 {
1523 	return 0;
1524 }
1525 
1526 static int vi_common_set_clockgating_state(void *handle,
1527 					    enum amd_clockgating_state state)
1528 {
1529 	return 0;
1530 }
1531 
1532 static int vi_common_set_powergating_state(void *handle,
1533 					    enum amd_powergating_state state)
1534 {
1535 	return 0;
1536 }
1537 
1538 const struct amd_ip_funcs vi_common_ip_funcs = {
1539 	.early_init = vi_common_early_init,
1540 	.late_init = NULL,
1541 	.sw_init = vi_common_sw_init,
1542 	.sw_fini = vi_common_sw_fini,
1543 	.hw_init = vi_common_hw_init,
1544 	.hw_fini = vi_common_hw_fini,
1545 	.suspend = vi_common_suspend,
1546 	.resume = vi_common_resume,
1547 	.is_idle = vi_common_is_idle,
1548 	.wait_for_idle = vi_common_wait_for_idle,
1549 	.soft_reset = vi_common_soft_reset,
1550 	.print_status = vi_common_print_status,
1551 	.set_clockgating_state = vi_common_set_clockgating_state,
1552 	.set_powergating_state = vi_common_set_powergating_state,
1553 };
1554 
1555