xref: /dflybsd-src/sys/dev/drm/amd/amdgpu/amdgpu_device.c (revision b843c749addef9340ee7d4e250b09fdd492602a1)
1*b843c749SSergey Zigachev /*
2*b843c749SSergey Zigachev  * Copyright 2008 Advanced Micro Devices, Inc.
3*b843c749SSergey Zigachev  * Copyright 2008 Red Hat Inc.
4*b843c749SSergey Zigachev  * Copyright 2009 Jerome Glisse.
5*b843c749SSergey Zigachev  *
6*b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
7*b843c749SSergey Zigachev  * copy of this software and associated documentation files (the "Software"),
8*b843c749SSergey Zigachev  * to deal in the Software without restriction, including without limitation
9*b843c749SSergey Zigachev  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10*b843c749SSergey Zigachev  * and/or sell copies of the Software, and to permit persons to whom the
11*b843c749SSergey Zigachev  * Software is furnished to do so, subject to the following conditions:
12*b843c749SSergey Zigachev  *
13*b843c749SSergey Zigachev  * The above copyright notice and this permission notice shall be included in
14*b843c749SSergey Zigachev  * all copies or substantial portions of the Software.
15*b843c749SSergey Zigachev  *
16*b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19*b843c749SSergey Zigachev  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20*b843c749SSergey Zigachev  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21*b843c749SSergey Zigachev  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22*b843c749SSergey Zigachev  * OTHER DEALINGS IN THE SOFTWARE.
23*b843c749SSergey Zigachev  *
24*b843c749SSergey Zigachev  * Authors: Dave Airlie
25*b843c749SSergey Zigachev  *          Alex Deucher
26*b843c749SSergey Zigachev  *          Jerome Glisse
27*b843c749SSergey Zigachev  */
28*b843c749SSergey Zigachev #include <linux/power_supply.h>
29*b843c749SSergey Zigachev #include <linux/kthread.h>
30*b843c749SSergey Zigachev #include <linux/console.h>
31*b843c749SSergey Zigachev #include <linux/slab.h>
32*b843c749SSergey Zigachev #include <drm/drmP.h>
33*b843c749SSergey Zigachev #include <drm/drm_crtc_helper.h>
34*b843c749SSergey Zigachev #include <drm/drm_atomic_helper.h>
35*b843c749SSergey Zigachev #include <drm/amdgpu_drm.h>
36*b843c749SSergey Zigachev #include <linux/vgaarb.h>
37*b843c749SSergey Zigachev #include <linux/vga_switcheroo.h>
38*b843c749SSergey Zigachev #include <linux/efi.h>
39*b843c749SSergey Zigachev #include "amdgpu.h"
40*b843c749SSergey Zigachev #include "amdgpu_trace.h"
41*b843c749SSergey Zigachev #include "amdgpu_i2c.h"
42*b843c749SSergey Zigachev #include "atom.h"
43*b843c749SSergey Zigachev #include "amdgpu_atombios.h"
44*b843c749SSergey Zigachev #include "amdgpu_atomfirmware.h"
45*b843c749SSergey Zigachev #include "amd_pcie.h"
46*b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_SI
47*b843c749SSergey Zigachev #include "si.h"
48*b843c749SSergey Zigachev #endif
49*b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_CIK
50*b843c749SSergey Zigachev #include "cik.h"
51*b843c749SSergey Zigachev #endif
52*b843c749SSergey Zigachev #include "vi.h"
53*b843c749SSergey Zigachev #include "soc15.h"
54*b843c749SSergey Zigachev #include "bif/bif_4_1_d.h"
55*b843c749SSergey Zigachev #include <linux/pci.h>
56*b843c749SSergey Zigachev #include <linux/firmware.h>
57*b843c749SSergey Zigachev #include "amdgpu_vf_error.h"
58*b843c749SSergey Zigachev 
59*b843c749SSergey Zigachev #include "amdgpu_amdkfd.h"
60*b843c749SSergey Zigachev #include "amdgpu_pm.h"
61*b843c749SSergey Zigachev 
62*b843c749SSergey Zigachev MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
63*b843c749SSergey Zigachev MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
64*b843c749SSergey Zigachev MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
65*b843c749SSergey Zigachev 
66*b843c749SSergey Zigachev #define AMDGPU_RESUME_MS		2000
67*b843c749SSergey Zigachev 
68*b843c749SSergey Zigachev static const char *amdgpu_asic_name[] = {
69*b843c749SSergey Zigachev 	"TAHITI",
70*b843c749SSergey Zigachev 	"PITCAIRN",
71*b843c749SSergey Zigachev 	"VERDE",
72*b843c749SSergey Zigachev 	"OLAND",
73*b843c749SSergey Zigachev 	"HAINAN",
74*b843c749SSergey Zigachev 	"BONAIRE",
75*b843c749SSergey Zigachev 	"KAVERI",
76*b843c749SSergey Zigachev 	"KABINI",
77*b843c749SSergey Zigachev 	"HAWAII",
78*b843c749SSergey Zigachev 	"MULLINS",
79*b843c749SSergey Zigachev 	"TOPAZ",
80*b843c749SSergey Zigachev 	"TONGA",
81*b843c749SSergey Zigachev 	"FIJI",
82*b843c749SSergey Zigachev 	"CARRIZO",
83*b843c749SSergey Zigachev 	"STONEY",
84*b843c749SSergey Zigachev 	"POLARIS10",
85*b843c749SSergey Zigachev 	"POLARIS11",
86*b843c749SSergey Zigachev 	"POLARIS12",
87*b843c749SSergey Zigachev 	"VEGAM",
88*b843c749SSergey Zigachev 	"VEGA10",
89*b843c749SSergey Zigachev 	"VEGA12",
90*b843c749SSergey Zigachev 	"VEGA20",
91*b843c749SSergey Zigachev 	"RAVEN",
92*b843c749SSergey Zigachev 	"LAST",
93*b843c749SSergey Zigachev };
94*b843c749SSergey Zigachev 
95*b843c749SSergey Zigachev static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
96*b843c749SSergey Zigachev 
97*b843c749SSergey Zigachev /**
98*b843c749SSergey Zigachev  * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
99*b843c749SSergey Zigachev  *
100*b843c749SSergey Zigachev  * @dev: drm_device pointer
101*b843c749SSergey Zigachev  *
102*b843c749SSergey Zigachev  * Returns true if the device is a dGPU with HG/PX power control,
103*b843c749SSergey Zigachev  * otherwise return false.
104*b843c749SSergey Zigachev  */
105*b843c749SSergey Zigachev bool amdgpu_device_is_px(struct drm_device *dev)
106*b843c749SSergey Zigachev {
107*b843c749SSergey Zigachev 	struct amdgpu_device *adev = dev->dev_private;
108*b843c749SSergey Zigachev 
109*b843c749SSergey Zigachev 	if (adev->flags & AMD_IS_PX)
110*b843c749SSergey Zigachev 		return true;
111*b843c749SSergey Zigachev 	return false;
112*b843c749SSergey Zigachev }
113*b843c749SSergey Zigachev 
114*b843c749SSergey Zigachev /*
115*b843c749SSergey Zigachev  * MMIO register access helper functions.
116*b843c749SSergey Zigachev  */
117*b843c749SSergey Zigachev /**
118*b843c749SSergey Zigachev  * amdgpu_mm_rreg - read a memory mapped IO register
119*b843c749SSergey Zigachev  *
120*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
121*b843c749SSergey Zigachev  * @reg: dword aligned register offset
122*b843c749SSergey Zigachev  * @acc_flags: access flags which require special behavior
123*b843c749SSergey Zigachev  *
124*b843c749SSergey Zigachev  * Returns the 32 bit value from the offset specified.
125*b843c749SSergey Zigachev  */
126*b843c749SSergey Zigachev uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
127*b843c749SSergey Zigachev 			uint32_t acc_flags)
128*b843c749SSergey Zigachev {
129*b843c749SSergey Zigachev 	uint32_t ret;
130*b843c749SSergey Zigachev 
131*b843c749SSergey Zigachev 	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
132*b843c749SSergey Zigachev 		return amdgpu_virt_kiq_rreg(adev, reg);
133*b843c749SSergey Zigachev 
134*b843c749SSergey Zigachev 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
135*b843c749SSergey Zigachev 		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
136*b843c749SSergey Zigachev 	else {
137*b843c749SSergey Zigachev 		unsigned long flags;
138*b843c749SSergey Zigachev 
139*b843c749SSergey Zigachev 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
140*b843c749SSergey Zigachev 		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
141*b843c749SSergey Zigachev 		ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
142*b843c749SSergey Zigachev 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
143*b843c749SSergey Zigachev 	}
144*b843c749SSergey Zigachev 	trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
145*b843c749SSergey Zigachev 	return ret;
146*b843c749SSergey Zigachev }
147*b843c749SSergey Zigachev 
148*b843c749SSergey Zigachev /*
149*b843c749SSergey Zigachev  * MMIO register read with bytes helper functions
150*b843c749SSergey Zigachev  * @offset:bytes offset from MMIO start
151*b843c749SSergey Zigachev  *
152*b843c749SSergey Zigachev */
153*b843c749SSergey Zigachev 
154*b843c749SSergey Zigachev /**
155*b843c749SSergey Zigachev  * amdgpu_mm_rreg8 - read a memory mapped IO register
156*b843c749SSergey Zigachev  *
157*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
158*b843c749SSergey Zigachev  * @offset: byte aligned register offset
159*b843c749SSergey Zigachev  *
160*b843c749SSergey Zigachev  * Returns the 8 bit value from the offset specified.
161*b843c749SSergey Zigachev  */
162*b843c749SSergey Zigachev uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
163*b843c749SSergey Zigachev 	if (offset < adev->rmmio_size)
164*b843c749SSergey Zigachev 		return (readb(adev->rmmio + offset));
165*b843c749SSergey Zigachev 	BUG();
166*b843c749SSergey Zigachev }
167*b843c749SSergey Zigachev 
168*b843c749SSergey Zigachev /*
169*b843c749SSergey Zigachev  * MMIO register write with bytes helper functions
170*b843c749SSergey Zigachev  * @offset:bytes offset from MMIO start
171*b843c749SSergey Zigachev  * @value: the value want to be written to the register
172*b843c749SSergey Zigachev  *
173*b843c749SSergey Zigachev */
174*b843c749SSergey Zigachev /**
175*b843c749SSergey Zigachev  * amdgpu_mm_wreg8 - read a memory mapped IO register
176*b843c749SSergey Zigachev  *
177*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
178*b843c749SSergey Zigachev  * @offset: byte aligned register offset
179*b843c749SSergey Zigachev  * @value: 8 bit value to write
180*b843c749SSergey Zigachev  *
181*b843c749SSergey Zigachev  * Writes the value specified to the offset specified.
182*b843c749SSergey Zigachev  */
183*b843c749SSergey Zigachev void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
184*b843c749SSergey Zigachev 	if (offset < adev->rmmio_size)
185*b843c749SSergey Zigachev 		writeb(value, adev->rmmio + offset);
186*b843c749SSergey Zigachev 	else
187*b843c749SSergey Zigachev 		BUG();
188*b843c749SSergey Zigachev }
189*b843c749SSergey Zigachev 
190*b843c749SSergey Zigachev /**
191*b843c749SSergey Zigachev  * amdgpu_mm_wreg - write to a memory mapped IO register
192*b843c749SSergey Zigachev  *
193*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
194*b843c749SSergey Zigachev  * @reg: dword aligned register offset
195*b843c749SSergey Zigachev  * @v: 32 bit value to write to the register
196*b843c749SSergey Zigachev  * @acc_flags: access flags which require special behavior
197*b843c749SSergey Zigachev  *
198*b843c749SSergey Zigachev  * Writes the value specified to the offset specified.
199*b843c749SSergey Zigachev  */
200*b843c749SSergey Zigachev void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
201*b843c749SSergey Zigachev 		    uint32_t acc_flags)
202*b843c749SSergey Zigachev {
203*b843c749SSergey Zigachev 	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
204*b843c749SSergey Zigachev 
205*b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
206*b843c749SSergey Zigachev 		adev->last_mm_index = v;
207*b843c749SSergey Zigachev 	}
208*b843c749SSergey Zigachev 
209*b843c749SSergey Zigachev 	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
210*b843c749SSergey Zigachev 		return amdgpu_virt_kiq_wreg(adev, reg, v);
211*b843c749SSergey Zigachev 
212*b843c749SSergey Zigachev 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
213*b843c749SSergey Zigachev 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
214*b843c749SSergey Zigachev 	else {
215*b843c749SSergey Zigachev 		unsigned long flags;
216*b843c749SSergey Zigachev 
217*b843c749SSergey Zigachev 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
218*b843c749SSergey Zigachev 		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
219*b843c749SSergey Zigachev 		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
220*b843c749SSergey Zigachev 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
221*b843c749SSergey Zigachev 	}
222*b843c749SSergey Zigachev 
223*b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
224*b843c749SSergey Zigachev 		udelay(500);
225*b843c749SSergey Zigachev 	}
226*b843c749SSergey Zigachev }
227*b843c749SSergey Zigachev 
228*b843c749SSergey Zigachev /**
229*b843c749SSergey Zigachev  * amdgpu_io_rreg - read an IO register
230*b843c749SSergey Zigachev  *
231*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
232*b843c749SSergey Zigachev  * @reg: dword aligned register offset
233*b843c749SSergey Zigachev  *
234*b843c749SSergey Zigachev  * Returns the 32 bit value from the offset specified.
235*b843c749SSergey Zigachev  */
236*b843c749SSergey Zigachev u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
237*b843c749SSergey Zigachev {
238*b843c749SSergey Zigachev 	if ((reg * 4) < adev->rio_mem_size)
239*b843c749SSergey Zigachev 		return ioread32(adev->rio_mem + (reg * 4));
240*b843c749SSergey Zigachev 	else {
241*b843c749SSergey Zigachev 		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
242*b843c749SSergey Zigachev 		return ioread32(adev->rio_mem + (mmMM_DATA * 4));
243*b843c749SSergey Zigachev 	}
244*b843c749SSergey Zigachev }
245*b843c749SSergey Zigachev 
246*b843c749SSergey Zigachev /**
247*b843c749SSergey Zigachev  * amdgpu_io_wreg - write to an IO register
248*b843c749SSergey Zigachev  *
249*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
250*b843c749SSergey Zigachev  * @reg: dword aligned register offset
251*b843c749SSergey Zigachev  * @v: 32 bit value to write to the register
252*b843c749SSergey Zigachev  *
253*b843c749SSergey Zigachev  * Writes the value specified to the offset specified.
254*b843c749SSergey Zigachev  */
255*b843c749SSergey Zigachev void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
256*b843c749SSergey Zigachev {
257*b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
258*b843c749SSergey Zigachev 		adev->last_mm_index = v;
259*b843c749SSergey Zigachev 	}
260*b843c749SSergey Zigachev 
261*b843c749SSergey Zigachev 	if ((reg * 4) < adev->rio_mem_size)
262*b843c749SSergey Zigachev 		iowrite32(v, adev->rio_mem + (reg * 4));
263*b843c749SSergey Zigachev 	else {
264*b843c749SSergey Zigachev 		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
265*b843c749SSergey Zigachev 		iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
266*b843c749SSergey Zigachev 	}
267*b843c749SSergey Zigachev 
268*b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
269*b843c749SSergey Zigachev 		udelay(500);
270*b843c749SSergey Zigachev 	}
271*b843c749SSergey Zigachev }
272*b843c749SSergey Zigachev 
273*b843c749SSergey Zigachev /**
274*b843c749SSergey Zigachev  * amdgpu_mm_rdoorbell - read a doorbell dword
275*b843c749SSergey Zigachev  *
276*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
277*b843c749SSergey Zigachev  * @index: doorbell index
278*b843c749SSergey Zigachev  *
279*b843c749SSergey Zigachev  * Returns the value in the doorbell aperture at the
280*b843c749SSergey Zigachev  * requested doorbell index (CIK).
281*b843c749SSergey Zigachev  */
282*b843c749SSergey Zigachev u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
283*b843c749SSergey Zigachev {
284*b843c749SSergey Zigachev 	if (index < adev->doorbell.num_doorbells) {
285*b843c749SSergey Zigachev 		return readl(adev->doorbell.ptr + index);
286*b843c749SSergey Zigachev 	} else {
287*b843c749SSergey Zigachev 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
288*b843c749SSergey Zigachev 		return 0;
289*b843c749SSergey Zigachev 	}
290*b843c749SSergey Zigachev }
291*b843c749SSergey Zigachev 
292*b843c749SSergey Zigachev /**
293*b843c749SSergey Zigachev  * amdgpu_mm_wdoorbell - write a doorbell dword
294*b843c749SSergey Zigachev  *
295*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
296*b843c749SSergey Zigachev  * @index: doorbell index
297*b843c749SSergey Zigachev  * @v: value to write
298*b843c749SSergey Zigachev  *
299*b843c749SSergey Zigachev  * Writes @v to the doorbell aperture at the
300*b843c749SSergey Zigachev  * requested doorbell index (CIK).
301*b843c749SSergey Zigachev  */
302*b843c749SSergey Zigachev void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
303*b843c749SSergey Zigachev {
304*b843c749SSergey Zigachev 	if (index < adev->doorbell.num_doorbells) {
305*b843c749SSergey Zigachev 		writel(v, adev->doorbell.ptr + index);
306*b843c749SSergey Zigachev 	} else {
307*b843c749SSergey Zigachev 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
308*b843c749SSergey Zigachev 	}
309*b843c749SSergey Zigachev }
310*b843c749SSergey Zigachev 
311*b843c749SSergey Zigachev /**
312*b843c749SSergey Zigachev  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
313*b843c749SSergey Zigachev  *
314*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
315*b843c749SSergey Zigachev  * @index: doorbell index
316*b843c749SSergey Zigachev  *
317*b843c749SSergey Zigachev  * Returns the value in the doorbell aperture at the
318*b843c749SSergey Zigachev  * requested doorbell index (VEGA10+).
319*b843c749SSergey Zigachev  */
320*b843c749SSergey Zigachev u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
321*b843c749SSergey Zigachev {
322*b843c749SSergey Zigachev 	if (index < adev->doorbell.num_doorbells) {
323*b843c749SSergey Zigachev 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
324*b843c749SSergey Zigachev 	} else {
325*b843c749SSergey Zigachev 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
326*b843c749SSergey Zigachev 		return 0;
327*b843c749SSergey Zigachev 	}
328*b843c749SSergey Zigachev }
329*b843c749SSergey Zigachev 
330*b843c749SSergey Zigachev /**
331*b843c749SSergey Zigachev  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
332*b843c749SSergey Zigachev  *
333*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
334*b843c749SSergey Zigachev  * @index: doorbell index
335*b843c749SSergey Zigachev  * @v: value to write
336*b843c749SSergey Zigachev  *
337*b843c749SSergey Zigachev  * Writes @v to the doorbell aperture at the
338*b843c749SSergey Zigachev  * requested doorbell index (VEGA10+).
339*b843c749SSergey Zigachev  */
340*b843c749SSergey Zigachev void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
341*b843c749SSergey Zigachev {
342*b843c749SSergey Zigachev 	if (index < adev->doorbell.num_doorbells) {
343*b843c749SSergey Zigachev 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
344*b843c749SSergey Zigachev 	} else {
345*b843c749SSergey Zigachev 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
346*b843c749SSergey Zigachev 	}
347*b843c749SSergey Zigachev }
348*b843c749SSergey Zigachev 
349*b843c749SSergey Zigachev /**
350*b843c749SSergey Zigachev  * amdgpu_invalid_rreg - dummy reg read function
351*b843c749SSergey Zigachev  *
352*b843c749SSergey Zigachev  * @adev: amdgpu device pointer
353*b843c749SSergey Zigachev  * @reg: offset of register
354*b843c749SSergey Zigachev  *
355*b843c749SSergey Zigachev  * Dummy register read function.  Used for register blocks
356*b843c749SSergey Zigachev  * that certain asics don't have (all asics).
357*b843c749SSergey Zigachev  * Returns the value in the register.
358*b843c749SSergey Zigachev  */
359*b843c749SSergey Zigachev static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
360*b843c749SSergey Zigachev {
361*b843c749SSergey Zigachev 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
362*b843c749SSergey Zigachev 	BUG();
363*b843c749SSergey Zigachev 	return 0;
364*b843c749SSergey Zigachev }
365*b843c749SSergey Zigachev 
366*b843c749SSergey Zigachev /**
367*b843c749SSergey Zigachev  * amdgpu_invalid_wreg - dummy reg write function
368*b843c749SSergey Zigachev  *
369*b843c749SSergey Zigachev  * @adev: amdgpu device pointer
370*b843c749SSergey Zigachev  * @reg: offset of register
371*b843c749SSergey Zigachev  * @v: value to write to the register
372*b843c749SSergey Zigachev  *
373*b843c749SSergey Zigachev  * Dummy register read function.  Used for register blocks
374*b843c749SSergey Zigachev  * that certain asics don't have (all asics).
375*b843c749SSergey Zigachev  */
376*b843c749SSergey Zigachev static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
377*b843c749SSergey Zigachev {
378*b843c749SSergey Zigachev 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
379*b843c749SSergey Zigachev 		  reg, v);
380*b843c749SSergey Zigachev 	BUG();
381*b843c749SSergey Zigachev }
382*b843c749SSergey Zigachev 
383*b843c749SSergey Zigachev /**
384*b843c749SSergey Zigachev  * amdgpu_block_invalid_rreg - dummy reg read function
385*b843c749SSergey Zigachev  *
386*b843c749SSergey Zigachev  * @adev: amdgpu device pointer
387*b843c749SSergey Zigachev  * @block: offset of instance
388*b843c749SSergey Zigachev  * @reg: offset of register
389*b843c749SSergey Zigachev  *
390*b843c749SSergey Zigachev  * Dummy register read function.  Used for register blocks
391*b843c749SSergey Zigachev  * that certain asics don't have (all asics).
392*b843c749SSergey Zigachev  * Returns the value in the register.
393*b843c749SSergey Zigachev  */
394*b843c749SSergey Zigachev static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
395*b843c749SSergey Zigachev 					  uint32_t block, uint32_t reg)
396*b843c749SSergey Zigachev {
397*b843c749SSergey Zigachev 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
398*b843c749SSergey Zigachev 		  reg, block);
399*b843c749SSergey Zigachev 	BUG();
400*b843c749SSergey Zigachev 	return 0;
401*b843c749SSergey Zigachev }
402*b843c749SSergey Zigachev 
403*b843c749SSergey Zigachev /**
404*b843c749SSergey Zigachev  * amdgpu_block_invalid_wreg - dummy reg write function
405*b843c749SSergey Zigachev  *
406*b843c749SSergey Zigachev  * @adev: amdgpu device pointer
407*b843c749SSergey Zigachev  * @block: offset of instance
408*b843c749SSergey Zigachev  * @reg: offset of register
409*b843c749SSergey Zigachev  * @v: value to write to the register
410*b843c749SSergey Zigachev  *
411*b843c749SSergey Zigachev  * Dummy register read function.  Used for register blocks
412*b843c749SSergey Zigachev  * that certain asics don't have (all asics).
413*b843c749SSergey Zigachev  */
414*b843c749SSergey Zigachev static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
415*b843c749SSergey Zigachev 				      uint32_t block,
416*b843c749SSergey Zigachev 				      uint32_t reg, uint32_t v)
417*b843c749SSergey Zigachev {
418*b843c749SSergey Zigachev 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
419*b843c749SSergey Zigachev 		  reg, block, v);
420*b843c749SSergey Zigachev 	BUG();
421*b843c749SSergey Zigachev }
422*b843c749SSergey Zigachev 
423*b843c749SSergey Zigachev /**
424*b843c749SSergey Zigachev  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
425*b843c749SSergey Zigachev  *
426*b843c749SSergey Zigachev  * @adev: amdgpu device pointer
427*b843c749SSergey Zigachev  *
428*b843c749SSergey Zigachev  * Allocates a scratch page of VRAM for use by various things in the
429*b843c749SSergey Zigachev  * driver.
430*b843c749SSergey Zigachev  */
431*b843c749SSergey Zigachev static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
432*b843c749SSergey Zigachev {
433*b843c749SSergey Zigachev 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
434*b843c749SSergey Zigachev 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
435*b843c749SSergey Zigachev 				       &adev->vram_scratch.robj,
436*b843c749SSergey Zigachev 				       &adev->vram_scratch.gpu_addr,
437*b843c749SSergey Zigachev 				       (void **)&adev->vram_scratch.ptr);
438*b843c749SSergey Zigachev }
439*b843c749SSergey Zigachev 
440*b843c749SSergey Zigachev /**
441*b843c749SSergey Zigachev  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
442*b843c749SSergey Zigachev  *
443*b843c749SSergey Zigachev  * @adev: amdgpu device pointer
444*b843c749SSergey Zigachev  *
445*b843c749SSergey Zigachev  * Frees the VRAM scratch page.
446*b843c749SSergey Zigachev  */
447*b843c749SSergey Zigachev static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
448*b843c749SSergey Zigachev {
449*b843c749SSergey Zigachev 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
450*b843c749SSergey Zigachev }
451*b843c749SSergey Zigachev 
452*b843c749SSergey Zigachev /**
453*b843c749SSergey Zigachev  * amdgpu_device_program_register_sequence - program an array of registers.
454*b843c749SSergey Zigachev  *
455*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
456*b843c749SSergey Zigachev  * @registers: pointer to the register array
457*b843c749SSergey Zigachev  * @array_size: size of the register array
458*b843c749SSergey Zigachev  *
459*b843c749SSergey Zigachev  * Programs an array or registers with and and or masks.
460*b843c749SSergey Zigachev  * This is a helper for setting golden registers.
461*b843c749SSergey Zigachev  */
462*b843c749SSergey Zigachev void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
463*b843c749SSergey Zigachev 					     const u32 *registers,
464*b843c749SSergey Zigachev 					     const u32 array_size)
465*b843c749SSergey Zigachev {
466*b843c749SSergey Zigachev 	u32 tmp, reg, and_mask, or_mask;
467*b843c749SSergey Zigachev 	int i;
468*b843c749SSergey Zigachev 
469*b843c749SSergey Zigachev 	if (array_size % 3)
470*b843c749SSergey Zigachev 		return;
471*b843c749SSergey Zigachev 
472*b843c749SSergey Zigachev 	for (i = 0; i < array_size; i +=3) {
473*b843c749SSergey Zigachev 		reg = registers[i + 0];
474*b843c749SSergey Zigachev 		and_mask = registers[i + 1];
475*b843c749SSergey Zigachev 		or_mask = registers[i + 2];
476*b843c749SSergey Zigachev 
477*b843c749SSergey Zigachev 		if (and_mask == 0xffffffff) {
478*b843c749SSergey Zigachev 			tmp = or_mask;
479*b843c749SSergey Zigachev 		} else {
480*b843c749SSergey Zigachev 			tmp = RREG32(reg);
481*b843c749SSergey Zigachev 			tmp &= ~and_mask;
482*b843c749SSergey Zigachev 			tmp |= or_mask;
483*b843c749SSergey Zigachev 		}
484*b843c749SSergey Zigachev 		WREG32(reg, tmp);
485*b843c749SSergey Zigachev 	}
486*b843c749SSergey Zigachev }
487*b843c749SSergey Zigachev 
488*b843c749SSergey Zigachev /**
489*b843c749SSergey Zigachev  * amdgpu_device_pci_config_reset - reset the GPU
490*b843c749SSergey Zigachev  *
491*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
492*b843c749SSergey Zigachev  *
493*b843c749SSergey Zigachev  * Resets the GPU using the pci config reset sequence.
494*b843c749SSergey Zigachev  * Only applicable to asics prior to vega10.
495*b843c749SSergey Zigachev  */
496*b843c749SSergey Zigachev void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
497*b843c749SSergey Zigachev {
498*b843c749SSergey Zigachev 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
499*b843c749SSergey Zigachev }
500*b843c749SSergey Zigachev 
501*b843c749SSergey Zigachev /*
502*b843c749SSergey Zigachev  * GPU doorbell aperture helpers function.
503*b843c749SSergey Zigachev  */
504*b843c749SSergey Zigachev /**
505*b843c749SSergey Zigachev  * amdgpu_device_doorbell_init - Init doorbell driver information.
506*b843c749SSergey Zigachev  *
507*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
508*b843c749SSergey Zigachev  *
509*b843c749SSergey Zigachev  * Init doorbell driver information (CIK)
510*b843c749SSergey Zigachev  * Returns 0 on success, error on failure.
511*b843c749SSergey Zigachev  */
512*b843c749SSergey Zigachev static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
513*b843c749SSergey Zigachev {
514*b843c749SSergey Zigachev 	/* No doorbell on SI hardware generation */
515*b843c749SSergey Zigachev 	if (adev->asic_type < CHIP_BONAIRE) {
516*b843c749SSergey Zigachev 		adev->doorbell.base = 0;
517*b843c749SSergey Zigachev 		adev->doorbell.size = 0;
518*b843c749SSergey Zigachev 		adev->doorbell.num_doorbells = 0;
519*b843c749SSergey Zigachev 		adev->doorbell.ptr = NULL;
520*b843c749SSergey Zigachev 		return 0;
521*b843c749SSergey Zigachev 	}
522*b843c749SSergey Zigachev 
523*b843c749SSergey Zigachev 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
524*b843c749SSergey Zigachev 		return -EINVAL;
525*b843c749SSergey Zigachev 
526*b843c749SSergey Zigachev 	/* doorbell bar mapping */
527*b843c749SSergey Zigachev 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
528*b843c749SSergey Zigachev 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
529*b843c749SSergey Zigachev 
530*b843c749SSergey Zigachev 	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
531*b843c749SSergey Zigachev 					     AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
532*b843c749SSergey Zigachev 	if (adev->doorbell.num_doorbells == 0)
533*b843c749SSergey Zigachev 		return -EINVAL;
534*b843c749SSergey Zigachev 
535*b843c749SSergey Zigachev 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
536*b843c749SSergey Zigachev 				     adev->doorbell.num_doorbells *
537*b843c749SSergey Zigachev 				     sizeof(u32));
538*b843c749SSergey Zigachev 	if (adev->doorbell.ptr == NULL)
539*b843c749SSergey Zigachev 		return -ENOMEM;
540*b843c749SSergey Zigachev 
541*b843c749SSergey Zigachev 	return 0;
542*b843c749SSergey Zigachev }
543*b843c749SSergey Zigachev 
544*b843c749SSergey Zigachev /**
545*b843c749SSergey Zigachev  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
546*b843c749SSergey Zigachev  *
547*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
548*b843c749SSergey Zigachev  *
549*b843c749SSergey Zigachev  * Tear down doorbell driver information (CIK)
550*b843c749SSergey Zigachev  */
551*b843c749SSergey Zigachev static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
552*b843c749SSergey Zigachev {
553*b843c749SSergey Zigachev 	iounmap(adev->doorbell.ptr);
554*b843c749SSergey Zigachev 	adev->doorbell.ptr = NULL;
555*b843c749SSergey Zigachev }
556*b843c749SSergey Zigachev 
557*b843c749SSergey Zigachev 
558*b843c749SSergey Zigachev 
559*b843c749SSergey Zigachev /*
560*b843c749SSergey Zigachev  * amdgpu_device_wb_*()
561*b843c749SSergey Zigachev  * Writeback is the method by which the GPU updates special pages in memory
562*b843c749SSergey Zigachev  * with the status of certain GPU events (fences, ring pointers,etc.).
563*b843c749SSergey Zigachev  */
564*b843c749SSergey Zigachev 
565*b843c749SSergey Zigachev /**
566*b843c749SSergey Zigachev  * amdgpu_device_wb_fini - Disable Writeback and free memory
567*b843c749SSergey Zigachev  *
568*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
569*b843c749SSergey Zigachev  *
570*b843c749SSergey Zigachev  * Disables Writeback and frees the Writeback memory (all asics).
571*b843c749SSergey Zigachev  * Used at driver shutdown.
572*b843c749SSergey Zigachev  */
573*b843c749SSergey Zigachev static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
574*b843c749SSergey Zigachev {
575*b843c749SSergey Zigachev 	if (adev->wb.wb_obj) {
576*b843c749SSergey Zigachev 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
577*b843c749SSergey Zigachev 				      &adev->wb.gpu_addr,
578*b843c749SSergey Zigachev 				      (void **)&adev->wb.wb);
579*b843c749SSergey Zigachev 		adev->wb.wb_obj = NULL;
580*b843c749SSergey Zigachev 	}
581*b843c749SSergey Zigachev }
582*b843c749SSergey Zigachev 
583*b843c749SSergey Zigachev /**
584*b843c749SSergey Zigachev  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
585*b843c749SSergey Zigachev  *
586*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
587*b843c749SSergey Zigachev  *
588*b843c749SSergey Zigachev  * Initializes writeback and allocates writeback memory (all asics).
589*b843c749SSergey Zigachev  * Used at driver startup.
590*b843c749SSergey Zigachev  * Returns 0 on success or an -error on failure.
591*b843c749SSergey Zigachev  */
592*b843c749SSergey Zigachev static int amdgpu_device_wb_init(struct amdgpu_device *adev)
593*b843c749SSergey Zigachev {
594*b843c749SSergey Zigachev 	int r;
595*b843c749SSergey Zigachev 
596*b843c749SSergey Zigachev 	if (adev->wb.wb_obj == NULL) {
597*b843c749SSergey Zigachev 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
598*b843c749SSergey Zigachev 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
599*b843c749SSergey Zigachev 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
600*b843c749SSergey Zigachev 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
601*b843c749SSergey Zigachev 					    (void **)&adev->wb.wb);
602*b843c749SSergey Zigachev 		if (r) {
603*b843c749SSergey Zigachev 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
604*b843c749SSergey Zigachev 			return r;
605*b843c749SSergey Zigachev 		}
606*b843c749SSergey Zigachev 
607*b843c749SSergey Zigachev 		adev->wb.num_wb = AMDGPU_MAX_WB;
608*b843c749SSergey Zigachev 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
609*b843c749SSergey Zigachev 
610*b843c749SSergey Zigachev 		/* clear wb memory */
611*b843c749SSergey Zigachev 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
612*b843c749SSergey Zigachev 	}
613*b843c749SSergey Zigachev 
614*b843c749SSergey Zigachev 	return 0;
615*b843c749SSergey Zigachev }
616*b843c749SSergey Zigachev 
617*b843c749SSergey Zigachev /**
618*b843c749SSergey Zigachev  * amdgpu_device_wb_get - Allocate a wb entry
619*b843c749SSergey Zigachev  *
620*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
621*b843c749SSergey Zigachev  * @wb: wb index
622*b843c749SSergey Zigachev  *
623*b843c749SSergey Zigachev  * Allocate a wb slot for use by the driver (all asics).
624*b843c749SSergey Zigachev  * Returns 0 on success or -EINVAL on failure.
625*b843c749SSergey Zigachev  */
626*b843c749SSergey Zigachev int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
627*b843c749SSergey Zigachev {
628*b843c749SSergey Zigachev 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
629*b843c749SSergey Zigachev 
630*b843c749SSergey Zigachev 	if (offset < adev->wb.num_wb) {
631*b843c749SSergey Zigachev 		__set_bit(offset, adev->wb.used);
632*b843c749SSergey Zigachev 		*wb = offset << 3; /* convert to dw offset */
633*b843c749SSergey Zigachev 		return 0;
634*b843c749SSergey Zigachev 	} else {
635*b843c749SSergey Zigachev 		return -EINVAL;
636*b843c749SSergey Zigachev 	}
637*b843c749SSergey Zigachev }
638*b843c749SSergey Zigachev 
639*b843c749SSergey Zigachev /**
640*b843c749SSergey Zigachev  * amdgpu_device_wb_free - Free a wb entry
641*b843c749SSergey Zigachev  *
642*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
643*b843c749SSergey Zigachev  * @wb: wb index
644*b843c749SSergey Zigachev  *
645*b843c749SSergey Zigachev  * Free a wb slot allocated for use by the driver (all asics)
646*b843c749SSergey Zigachev  */
647*b843c749SSergey Zigachev void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
648*b843c749SSergey Zigachev {
649*b843c749SSergey Zigachev 	wb >>= 3;
650*b843c749SSergey Zigachev 	if (wb < adev->wb.num_wb)
651*b843c749SSergey Zigachev 		__clear_bit(wb, adev->wb.used);
652*b843c749SSergey Zigachev }
653*b843c749SSergey Zigachev 
654*b843c749SSergey Zigachev /**
655*b843c749SSergey Zigachev  * amdgpu_device_vram_location - try to find VRAM location
656*b843c749SSergey Zigachev  *
657*b843c749SSergey Zigachev  * @adev: amdgpu device structure holding all necessary informations
658*b843c749SSergey Zigachev  * @mc: memory controller structure holding memory informations
659*b843c749SSergey Zigachev  * @base: base address at which to put VRAM
660*b843c749SSergey Zigachev  *
661*b843c749SSergey Zigachev  * Function will try to place VRAM at base address provided
662*b843c749SSergey Zigachev  * as parameter.
663*b843c749SSergey Zigachev  */
664*b843c749SSergey Zigachev void amdgpu_device_vram_location(struct amdgpu_device *adev,
665*b843c749SSergey Zigachev 				 struct amdgpu_gmc *mc, u64 base)
666*b843c749SSergey Zigachev {
667*b843c749SSergey Zigachev 	uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
668*b843c749SSergey Zigachev 
669*b843c749SSergey Zigachev 	mc->vram_start = base;
670*b843c749SSergey Zigachev 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
671*b843c749SSergey Zigachev 	if (limit && limit < mc->real_vram_size)
672*b843c749SSergey Zigachev 		mc->real_vram_size = limit;
673*b843c749SSergey Zigachev 	dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
674*b843c749SSergey Zigachev 			mc->mc_vram_size >> 20, mc->vram_start,
675*b843c749SSergey Zigachev 			mc->vram_end, mc->real_vram_size >> 20);
676*b843c749SSergey Zigachev }
677*b843c749SSergey Zigachev 
678*b843c749SSergey Zigachev /**
679*b843c749SSergey Zigachev  * amdgpu_device_gart_location - try to find GART location
680*b843c749SSergey Zigachev  *
681*b843c749SSergey Zigachev  * @adev: amdgpu device structure holding all necessary informations
682*b843c749SSergey Zigachev  * @mc: memory controller structure holding memory informations
683*b843c749SSergey Zigachev  *
684*b843c749SSergey Zigachev  * Function will place try to place GART before or after VRAM.
685*b843c749SSergey Zigachev  *
686*b843c749SSergey Zigachev  * If GART size is bigger than space left then we ajust GART size.
687*b843c749SSergey Zigachev  * Thus function will never fails.
688*b843c749SSergey Zigachev  */
689*b843c749SSergey Zigachev void amdgpu_device_gart_location(struct amdgpu_device *adev,
690*b843c749SSergey Zigachev 				 struct amdgpu_gmc *mc)
691*b843c749SSergey Zigachev {
692*b843c749SSergey Zigachev 	u64 size_af, size_bf;
693*b843c749SSergey Zigachev 
694*b843c749SSergey Zigachev 	mc->gart_size += adev->pm.smu_prv_buffer_size;
695*b843c749SSergey Zigachev 
696*b843c749SSergey Zigachev 	size_af = adev->gmc.mc_mask - mc->vram_end;
697*b843c749SSergey Zigachev 	size_bf = mc->vram_start;
698*b843c749SSergey Zigachev 	if (size_bf > size_af) {
699*b843c749SSergey Zigachev 		if (mc->gart_size > size_bf) {
700*b843c749SSergey Zigachev 			dev_warn(adev->dev, "limiting GART\n");
701*b843c749SSergey Zigachev 			mc->gart_size = size_bf;
702*b843c749SSergey Zigachev 		}
703*b843c749SSergey Zigachev 		mc->gart_start = 0;
704*b843c749SSergey Zigachev 	} else {
705*b843c749SSergey Zigachev 		if (mc->gart_size > size_af) {
706*b843c749SSergey Zigachev 			dev_warn(adev->dev, "limiting GART\n");
707*b843c749SSergey Zigachev 			mc->gart_size = size_af;
708*b843c749SSergey Zigachev 		}
709*b843c749SSergey Zigachev 		/* VCE doesn't like it when BOs cross a 4GB segment, so align
710*b843c749SSergey Zigachev 		 * the GART base on a 4GB boundary as well.
711*b843c749SSergey Zigachev 		 */
712*b843c749SSergey Zigachev 		mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
713*b843c749SSergey Zigachev 	}
714*b843c749SSergey Zigachev 	mc->gart_end = mc->gart_start + mc->gart_size - 1;
715*b843c749SSergey Zigachev 	dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
716*b843c749SSergey Zigachev 			mc->gart_size >> 20, mc->gart_start, mc->gart_end);
717*b843c749SSergey Zigachev }
718*b843c749SSergey Zigachev 
719*b843c749SSergey Zigachev /**
720*b843c749SSergey Zigachev  * amdgpu_device_resize_fb_bar - try to resize FB BAR
721*b843c749SSergey Zigachev  *
722*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
723*b843c749SSergey Zigachev  *
724*b843c749SSergey Zigachev  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
725*b843c749SSergey Zigachev  * to fail, but if any of the BARs is not accessible after the size we abort
726*b843c749SSergey Zigachev  * driver loading by returning -ENODEV.
727*b843c749SSergey Zigachev  */
728*b843c749SSergey Zigachev int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
729*b843c749SSergey Zigachev {
730*b843c749SSergey Zigachev 	u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
731*b843c749SSergey Zigachev 	u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
732*b843c749SSergey Zigachev 	struct pci_bus *root;
733*b843c749SSergey Zigachev 	struct resource *res;
734*b843c749SSergey Zigachev 	unsigned i;
735*b843c749SSergey Zigachev 	u16 cmd;
736*b843c749SSergey Zigachev 	int r;
737*b843c749SSergey Zigachev 
738*b843c749SSergey Zigachev 	/* Bypass for VF */
739*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
740*b843c749SSergey Zigachev 		return 0;
741*b843c749SSergey Zigachev 
742*b843c749SSergey Zigachev 	/* Check if the root BUS has 64bit memory resources */
743*b843c749SSergey Zigachev 	root = adev->pdev->bus;
744*b843c749SSergey Zigachev 	while (root->parent)
745*b843c749SSergey Zigachev 		root = root->parent;
746*b843c749SSergey Zigachev 
747*b843c749SSergey Zigachev 	pci_bus_for_each_resource(root, res, i) {
748*b843c749SSergey Zigachev 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
749*b843c749SSergey Zigachev 		    res->start > 0x100000000ull)
750*b843c749SSergey Zigachev 			break;
751*b843c749SSergey Zigachev 	}
752*b843c749SSergey Zigachev 
753*b843c749SSergey Zigachev 	/* Trying to resize is pointless without a root hub window above 4GB */
754*b843c749SSergey Zigachev 	if (!res)
755*b843c749SSergey Zigachev 		return 0;
756*b843c749SSergey Zigachev 
757*b843c749SSergey Zigachev 	/* Disable memory decoding while we change the BAR addresses and size */
758*b843c749SSergey Zigachev 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
759*b843c749SSergey Zigachev 	pci_write_config_word(adev->pdev, PCI_COMMAND,
760*b843c749SSergey Zigachev 			      cmd & ~PCI_COMMAND_MEMORY);
761*b843c749SSergey Zigachev 
762*b843c749SSergey Zigachev 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
763*b843c749SSergey Zigachev 	amdgpu_device_doorbell_fini(adev);
764*b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_BONAIRE)
765*b843c749SSergey Zigachev 		pci_release_resource(adev->pdev, 2);
766*b843c749SSergey Zigachev 
767*b843c749SSergey Zigachev 	pci_release_resource(adev->pdev, 0);
768*b843c749SSergey Zigachev 
769*b843c749SSergey Zigachev 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
770*b843c749SSergey Zigachev 	if (r == -ENOSPC)
771*b843c749SSergey Zigachev 		DRM_INFO("Not enough PCI address space for a large BAR.");
772*b843c749SSergey Zigachev 	else if (r && r != -ENOTSUPP)
773*b843c749SSergey Zigachev 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
774*b843c749SSergey Zigachev 
775*b843c749SSergey Zigachev 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
776*b843c749SSergey Zigachev 
777*b843c749SSergey Zigachev 	/* When the doorbell or fb BAR isn't available we have no chance of
778*b843c749SSergey Zigachev 	 * using the device.
779*b843c749SSergey Zigachev 	 */
780*b843c749SSergey Zigachev 	r = amdgpu_device_doorbell_init(adev);
781*b843c749SSergey Zigachev 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
782*b843c749SSergey Zigachev 		return -ENODEV;
783*b843c749SSergey Zigachev 
784*b843c749SSergey Zigachev 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
785*b843c749SSergey Zigachev 
786*b843c749SSergey Zigachev 	return 0;
787*b843c749SSergey Zigachev }
788*b843c749SSergey Zigachev 
789*b843c749SSergey Zigachev /*
790*b843c749SSergey Zigachev  * GPU helpers function.
791*b843c749SSergey Zigachev  */
792*b843c749SSergey Zigachev /**
793*b843c749SSergey Zigachev  * amdgpu_device_need_post - check if the hw need post or not
794*b843c749SSergey Zigachev  *
795*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
796*b843c749SSergey Zigachev  *
797*b843c749SSergey Zigachev  * Check if the asic has been initialized (all asics) at driver startup
798*b843c749SSergey Zigachev  * or post is needed if  hw reset is performed.
799*b843c749SSergey Zigachev  * Returns true if need or false if not.
800*b843c749SSergey Zigachev  */
801*b843c749SSergey Zigachev bool amdgpu_device_need_post(struct amdgpu_device *adev)
802*b843c749SSergey Zigachev {
803*b843c749SSergey Zigachev 	uint32_t reg;
804*b843c749SSergey Zigachev 
805*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
806*b843c749SSergey Zigachev 		return false;
807*b843c749SSergey Zigachev 
808*b843c749SSergey Zigachev 	if (amdgpu_passthrough(adev)) {
809*b843c749SSergey Zigachev 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
810*b843c749SSergey Zigachev 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
811*b843c749SSergey Zigachev 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
812*b843c749SSergey Zigachev 		 * vpost executed for smc version below 22.15
813*b843c749SSergey Zigachev 		 */
814*b843c749SSergey Zigachev 		if (adev->asic_type == CHIP_FIJI) {
815*b843c749SSergey Zigachev 			int err;
816*b843c749SSergey Zigachev 			uint32_t fw_ver;
817*b843c749SSergey Zigachev 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
818*b843c749SSergey Zigachev 			/* force vPost if error occured */
819*b843c749SSergey Zigachev 			if (err)
820*b843c749SSergey Zigachev 				return true;
821*b843c749SSergey Zigachev 
822*b843c749SSergey Zigachev 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
823*b843c749SSergey Zigachev 			if (fw_ver < 0x00160e00)
824*b843c749SSergey Zigachev 				return true;
825*b843c749SSergey Zigachev 		}
826*b843c749SSergey Zigachev 	}
827*b843c749SSergey Zigachev 
828*b843c749SSergey Zigachev 	if (adev->has_hw_reset) {
829*b843c749SSergey Zigachev 		adev->has_hw_reset = false;
830*b843c749SSergey Zigachev 		return true;
831*b843c749SSergey Zigachev 	}
832*b843c749SSergey Zigachev 
833*b843c749SSergey Zigachev 	/* bios scratch used on CIK+ */
834*b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_BONAIRE)
835*b843c749SSergey Zigachev 		return amdgpu_atombios_scratch_need_asic_init(adev);
836*b843c749SSergey Zigachev 
837*b843c749SSergey Zigachev 	/* check MEM_SIZE for older asics */
838*b843c749SSergey Zigachev 	reg = amdgpu_asic_get_config_memsize(adev);
839*b843c749SSergey Zigachev 
840*b843c749SSergey Zigachev 	if ((reg != 0) && (reg != 0xffffffff))
841*b843c749SSergey Zigachev 		return false;
842*b843c749SSergey Zigachev 
843*b843c749SSergey Zigachev 	return true;
844*b843c749SSergey Zigachev }
845*b843c749SSergey Zigachev 
846*b843c749SSergey Zigachev /* if we get transitioned to only one device, take VGA back */
847*b843c749SSergey Zigachev /**
848*b843c749SSergey Zigachev  * amdgpu_device_vga_set_decode - enable/disable vga decode
849*b843c749SSergey Zigachev  *
850*b843c749SSergey Zigachev  * @cookie: amdgpu_device pointer
851*b843c749SSergey Zigachev  * @state: enable/disable vga decode
852*b843c749SSergey Zigachev  *
853*b843c749SSergey Zigachev  * Enable/disable vga decode (all asics).
854*b843c749SSergey Zigachev  * Returns VGA resource flags.
855*b843c749SSergey Zigachev  */
856*b843c749SSergey Zigachev static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
857*b843c749SSergey Zigachev {
858*b843c749SSergey Zigachev 	struct amdgpu_device *adev = cookie;
859*b843c749SSergey Zigachev 	amdgpu_asic_set_vga_state(adev, state);
860*b843c749SSergey Zigachev 	if (state)
861*b843c749SSergey Zigachev 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
862*b843c749SSergey Zigachev 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
863*b843c749SSergey Zigachev 	else
864*b843c749SSergey Zigachev 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
865*b843c749SSergey Zigachev }
866*b843c749SSergey Zigachev 
867*b843c749SSergey Zigachev /**
868*b843c749SSergey Zigachev  * amdgpu_device_check_block_size - validate the vm block size
869*b843c749SSergey Zigachev  *
870*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
871*b843c749SSergey Zigachev  *
872*b843c749SSergey Zigachev  * Validates the vm block size specified via module parameter.
873*b843c749SSergey Zigachev  * The vm block size defines number of bits in page table versus page directory,
874*b843c749SSergey Zigachev  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
875*b843c749SSergey Zigachev  * page table and the remaining bits are in the page directory.
876*b843c749SSergey Zigachev  */
877*b843c749SSergey Zigachev static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
878*b843c749SSergey Zigachev {
879*b843c749SSergey Zigachev 	/* defines number of bits in page table versus page directory,
880*b843c749SSergey Zigachev 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
881*b843c749SSergey Zigachev 	 * page table and the remaining bits are in the page directory */
882*b843c749SSergey Zigachev 	if (amdgpu_vm_block_size == -1)
883*b843c749SSergey Zigachev 		return;
884*b843c749SSergey Zigachev 
885*b843c749SSergey Zigachev 	if (amdgpu_vm_block_size < 9) {
886*b843c749SSergey Zigachev 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
887*b843c749SSergey Zigachev 			 amdgpu_vm_block_size);
888*b843c749SSergey Zigachev 		amdgpu_vm_block_size = -1;
889*b843c749SSergey Zigachev 	}
890*b843c749SSergey Zigachev }
891*b843c749SSergey Zigachev 
892*b843c749SSergey Zigachev /**
893*b843c749SSergey Zigachev  * amdgpu_device_check_vm_size - validate the vm size
894*b843c749SSergey Zigachev  *
895*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
896*b843c749SSergey Zigachev  *
897*b843c749SSergey Zigachev  * Validates the vm size in GB specified via module parameter.
898*b843c749SSergey Zigachev  * The VM size is the size of the GPU virtual memory space in GB.
899*b843c749SSergey Zigachev  */
900*b843c749SSergey Zigachev static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
901*b843c749SSergey Zigachev {
902*b843c749SSergey Zigachev 	/* no need to check the default value */
903*b843c749SSergey Zigachev 	if (amdgpu_vm_size == -1)
904*b843c749SSergey Zigachev 		return;
905*b843c749SSergey Zigachev 
906*b843c749SSergey Zigachev 	if (amdgpu_vm_size < 1) {
907*b843c749SSergey Zigachev 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
908*b843c749SSergey Zigachev 			 amdgpu_vm_size);
909*b843c749SSergey Zigachev 		amdgpu_vm_size = -1;
910*b843c749SSergey Zigachev 	}
911*b843c749SSergey Zigachev }
912*b843c749SSergey Zigachev 
913*b843c749SSergey Zigachev static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
914*b843c749SSergey Zigachev {
915*b843c749SSergey Zigachev 	struct sysinfo si;
916*b843c749SSergey Zigachev 	bool is_os_64 = (sizeof(void *) == 8) ? true : false;
917*b843c749SSergey Zigachev 	uint64_t total_memory;
918*b843c749SSergey Zigachev 	uint64_t dram_size_seven_GB = 0x1B8000000;
919*b843c749SSergey Zigachev 	uint64_t dram_size_three_GB = 0xB8000000;
920*b843c749SSergey Zigachev 
921*b843c749SSergey Zigachev 	if (amdgpu_smu_memory_pool_size == 0)
922*b843c749SSergey Zigachev 		return;
923*b843c749SSergey Zigachev 
924*b843c749SSergey Zigachev 	if (!is_os_64) {
925*b843c749SSergey Zigachev 		DRM_WARN("Not 64-bit OS, feature not supported\n");
926*b843c749SSergey Zigachev 		goto def_value;
927*b843c749SSergey Zigachev 	}
928*b843c749SSergey Zigachev 	si_meminfo(&si);
929*b843c749SSergey Zigachev 	total_memory = (uint64_t)si.totalram * si.mem_unit;
930*b843c749SSergey Zigachev 
931*b843c749SSergey Zigachev 	if ((amdgpu_smu_memory_pool_size == 1) ||
932*b843c749SSergey Zigachev 		(amdgpu_smu_memory_pool_size == 2)) {
933*b843c749SSergey Zigachev 		if (total_memory < dram_size_three_GB)
934*b843c749SSergey Zigachev 			goto def_value1;
935*b843c749SSergey Zigachev 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
936*b843c749SSergey Zigachev 		(amdgpu_smu_memory_pool_size == 8)) {
937*b843c749SSergey Zigachev 		if (total_memory < dram_size_seven_GB)
938*b843c749SSergey Zigachev 			goto def_value1;
939*b843c749SSergey Zigachev 	} else {
940*b843c749SSergey Zigachev 		DRM_WARN("Smu memory pool size not supported\n");
941*b843c749SSergey Zigachev 		goto def_value;
942*b843c749SSergey Zigachev 	}
943*b843c749SSergey Zigachev 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
944*b843c749SSergey Zigachev 
945*b843c749SSergey Zigachev 	return;
946*b843c749SSergey Zigachev 
947*b843c749SSergey Zigachev def_value1:
948*b843c749SSergey Zigachev 	DRM_WARN("No enough system memory\n");
949*b843c749SSergey Zigachev def_value:
950*b843c749SSergey Zigachev 	adev->pm.smu_prv_buffer_size = 0;
951*b843c749SSergey Zigachev }
952*b843c749SSergey Zigachev 
953*b843c749SSergey Zigachev /**
954*b843c749SSergey Zigachev  * amdgpu_device_check_arguments - validate module params
955*b843c749SSergey Zigachev  *
956*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
957*b843c749SSergey Zigachev  *
958*b843c749SSergey Zigachev  * Validates certain module parameters and updates
959*b843c749SSergey Zigachev  * the associated values used by the driver (all asics).
960*b843c749SSergey Zigachev  */
961*b843c749SSergey Zigachev static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
962*b843c749SSergey Zigachev {
963*b843c749SSergey Zigachev 	if (amdgpu_sched_jobs < 4) {
964*b843c749SSergey Zigachev 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
965*b843c749SSergey Zigachev 			 amdgpu_sched_jobs);
966*b843c749SSergey Zigachev 		amdgpu_sched_jobs = 4;
967*b843c749SSergey Zigachev 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
968*b843c749SSergey Zigachev 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
969*b843c749SSergey Zigachev 			 amdgpu_sched_jobs);
970*b843c749SSergey Zigachev 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
971*b843c749SSergey Zigachev 	}
972*b843c749SSergey Zigachev 
973*b843c749SSergey Zigachev 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
974*b843c749SSergey Zigachev 		/* gart size must be greater or equal to 32M */
975*b843c749SSergey Zigachev 		dev_warn(adev->dev, "gart size (%d) too small\n",
976*b843c749SSergey Zigachev 			 amdgpu_gart_size);
977*b843c749SSergey Zigachev 		amdgpu_gart_size = -1;
978*b843c749SSergey Zigachev 	}
979*b843c749SSergey Zigachev 
980*b843c749SSergey Zigachev 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
981*b843c749SSergey Zigachev 		/* gtt size must be greater or equal to 32M */
982*b843c749SSergey Zigachev 		dev_warn(adev->dev, "gtt size (%d) too small\n",
983*b843c749SSergey Zigachev 				 amdgpu_gtt_size);
984*b843c749SSergey Zigachev 		amdgpu_gtt_size = -1;
985*b843c749SSergey Zigachev 	}
986*b843c749SSergey Zigachev 
987*b843c749SSergey Zigachev 	/* valid range is between 4 and 9 inclusive */
988*b843c749SSergey Zigachev 	if (amdgpu_vm_fragment_size != -1 &&
989*b843c749SSergey Zigachev 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
990*b843c749SSergey Zigachev 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
991*b843c749SSergey Zigachev 		amdgpu_vm_fragment_size = -1;
992*b843c749SSergey Zigachev 	}
993*b843c749SSergey Zigachev 
994*b843c749SSergey Zigachev 	amdgpu_device_check_smu_prv_buffer_size(adev);
995*b843c749SSergey Zigachev 
996*b843c749SSergey Zigachev 	amdgpu_device_check_vm_size(adev);
997*b843c749SSergey Zigachev 
998*b843c749SSergey Zigachev 	amdgpu_device_check_block_size(adev);
999*b843c749SSergey Zigachev 
1000*b843c749SSergey Zigachev 	if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1001*b843c749SSergey Zigachev 	    !is_power_of_2(amdgpu_vram_page_split))) {
1002*b843c749SSergey Zigachev 		dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1003*b843c749SSergey Zigachev 			 amdgpu_vram_page_split);
1004*b843c749SSergey Zigachev 		amdgpu_vram_page_split = 1024;
1005*b843c749SSergey Zigachev 	}
1006*b843c749SSergey Zigachev 
1007*b843c749SSergey Zigachev 	if (amdgpu_lockup_timeout == 0) {
1008*b843c749SSergey Zigachev 		dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
1009*b843c749SSergey Zigachev 		amdgpu_lockup_timeout = 10000;
1010*b843c749SSergey Zigachev 	}
1011*b843c749SSergey Zigachev 
1012*b843c749SSergey Zigachev 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1013*b843c749SSergey Zigachev }
1014*b843c749SSergey Zigachev 
1015*b843c749SSergey Zigachev /**
1016*b843c749SSergey Zigachev  * amdgpu_switcheroo_set_state - set switcheroo state
1017*b843c749SSergey Zigachev  *
1018*b843c749SSergey Zigachev  * @pdev: pci dev pointer
1019*b843c749SSergey Zigachev  * @state: vga_switcheroo state
1020*b843c749SSergey Zigachev  *
1021*b843c749SSergey Zigachev  * Callback for the switcheroo driver.  Suspends or resumes the
1022*b843c749SSergey Zigachev  * the asics before or after it is powered up using ACPI methods.
1023*b843c749SSergey Zigachev  */
1024*b843c749SSergey Zigachev static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1025*b843c749SSergey Zigachev {
1026*b843c749SSergey Zigachev 	struct drm_device *dev = pci_get_drvdata(pdev);
1027*b843c749SSergey Zigachev 
1028*b843c749SSergey Zigachev 	if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1029*b843c749SSergey Zigachev 		return;
1030*b843c749SSergey Zigachev 
1031*b843c749SSergey Zigachev 	if (state == VGA_SWITCHEROO_ON) {
1032*b843c749SSergey Zigachev 		pr_info("amdgpu: switched on\n");
1033*b843c749SSergey Zigachev 		/* don't suspend or resume card normally */
1034*b843c749SSergey Zigachev 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1035*b843c749SSergey Zigachev 
1036*b843c749SSergey Zigachev 		amdgpu_device_resume(dev, true, true);
1037*b843c749SSergey Zigachev 
1038*b843c749SSergey Zigachev 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1039*b843c749SSergey Zigachev 		drm_kms_helper_poll_enable(dev);
1040*b843c749SSergey Zigachev 	} else {
1041*b843c749SSergey Zigachev 		pr_info("amdgpu: switched off\n");
1042*b843c749SSergey Zigachev 		drm_kms_helper_poll_disable(dev);
1043*b843c749SSergey Zigachev 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1044*b843c749SSergey Zigachev 		amdgpu_device_suspend(dev, true, true);
1045*b843c749SSergey Zigachev 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1046*b843c749SSergey Zigachev 	}
1047*b843c749SSergey Zigachev }
1048*b843c749SSergey Zigachev 
1049*b843c749SSergey Zigachev /**
1050*b843c749SSergey Zigachev  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1051*b843c749SSergey Zigachev  *
1052*b843c749SSergey Zigachev  * @pdev: pci dev pointer
1053*b843c749SSergey Zigachev  *
1054*b843c749SSergey Zigachev  * Callback for the switcheroo driver.  Check of the switcheroo
1055*b843c749SSergey Zigachev  * state can be changed.
1056*b843c749SSergey Zigachev  * Returns true if the state can be changed, false if not.
1057*b843c749SSergey Zigachev  */
1058*b843c749SSergey Zigachev static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1059*b843c749SSergey Zigachev {
1060*b843c749SSergey Zigachev 	struct drm_device *dev = pci_get_drvdata(pdev);
1061*b843c749SSergey Zigachev 
1062*b843c749SSergey Zigachev 	/*
1063*b843c749SSergey Zigachev 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1064*b843c749SSergey Zigachev 	* locking inversion with the driver load path. And the access here is
1065*b843c749SSergey Zigachev 	* completely racy anyway. So don't bother with locking for now.
1066*b843c749SSergey Zigachev 	*/
1067*b843c749SSergey Zigachev 	return dev->open_count == 0;
1068*b843c749SSergey Zigachev }
1069*b843c749SSergey Zigachev 
1070*b843c749SSergey Zigachev static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1071*b843c749SSergey Zigachev 	.set_gpu_state = amdgpu_switcheroo_set_state,
1072*b843c749SSergey Zigachev 	.reprobe = NULL,
1073*b843c749SSergey Zigachev 	.can_switch = amdgpu_switcheroo_can_switch,
1074*b843c749SSergey Zigachev };
1075*b843c749SSergey Zigachev 
1076*b843c749SSergey Zigachev /**
1077*b843c749SSergey Zigachev  * amdgpu_device_ip_set_clockgating_state - set the CG state
1078*b843c749SSergey Zigachev  *
1079*b843c749SSergey Zigachev  * @dev: amdgpu_device pointer
1080*b843c749SSergey Zigachev  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1081*b843c749SSergey Zigachev  * @state: clockgating state (gate or ungate)
1082*b843c749SSergey Zigachev  *
1083*b843c749SSergey Zigachev  * Sets the requested clockgating state for all instances of
1084*b843c749SSergey Zigachev  * the hardware IP specified.
1085*b843c749SSergey Zigachev  * Returns the error code from the last instance.
1086*b843c749SSergey Zigachev  */
1087*b843c749SSergey Zigachev int amdgpu_device_ip_set_clockgating_state(void *dev,
1088*b843c749SSergey Zigachev 					   enum amd_ip_block_type block_type,
1089*b843c749SSergey Zigachev 					   enum amd_clockgating_state state)
1090*b843c749SSergey Zigachev {
1091*b843c749SSergey Zigachev 	struct amdgpu_device *adev = dev;
1092*b843c749SSergey Zigachev 	int i, r = 0;
1093*b843c749SSergey Zigachev 
1094*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1095*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1096*b843c749SSergey Zigachev 			continue;
1097*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != block_type)
1098*b843c749SSergey Zigachev 			continue;
1099*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1100*b843c749SSergey Zigachev 			continue;
1101*b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1102*b843c749SSergey Zigachev 			(void *)adev, state);
1103*b843c749SSergey Zigachev 		if (r)
1104*b843c749SSergey Zigachev 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1105*b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1106*b843c749SSergey Zigachev 	}
1107*b843c749SSergey Zigachev 	return r;
1108*b843c749SSergey Zigachev }
1109*b843c749SSergey Zigachev 
1110*b843c749SSergey Zigachev /**
1111*b843c749SSergey Zigachev  * amdgpu_device_ip_set_powergating_state - set the PG state
1112*b843c749SSergey Zigachev  *
1113*b843c749SSergey Zigachev  * @dev: amdgpu_device pointer
1114*b843c749SSergey Zigachev  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1115*b843c749SSergey Zigachev  * @state: powergating state (gate or ungate)
1116*b843c749SSergey Zigachev  *
1117*b843c749SSergey Zigachev  * Sets the requested powergating state for all instances of
1118*b843c749SSergey Zigachev  * the hardware IP specified.
1119*b843c749SSergey Zigachev  * Returns the error code from the last instance.
1120*b843c749SSergey Zigachev  */
1121*b843c749SSergey Zigachev int amdgpu_device_ip_set_powergating_state(void *dev,
1122*b843c749SSergey Zigachev 					   enum amd_ip_block_type block_type,
1123*b843c749SSergey Zigachev 					   enum amd_powergating_state state)
1124*b843c749SSergey Zigachev {
1125*b843c749SSergey Zigachev 	struct amdgpu_device *adev = dev;
1126*b843c749SSergey Zigachev 	int i, r = 0;
1127*b843c749SSergey Zigachev 
1128*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1129*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1130*b843c749SSergey Zigachev 			continue;
1131*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != block_type)
1132*b843c749SSergey Zigachev 			continue;
1133*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1134*b843c749SSergey Zigachev 			continue;
1135*b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1136*b843c749SSergey Zigachev 			(void *)adev, state);
1137*b843c749SSergey Zigachev 		if (r)
1138*b843c749SSergey Zigachev 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1139*b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1140*b843c749SSergey Zigachev 	}
1141*b843c749SSergey Zigachev 	return r;
1142*b843c749SSergey Zigachev }
1143*b843c749SSergey Zigachev 
1144*b843c749SSergey Zigachev /**
1145*b843c749SSergey Zigachev  * amdgpu_device_ip_get_clockgating_state - get the CG state
1146*b843c749SSergey Zigachev  *
1147*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1148*b843c749SSergey Zigachev  * @flags: clockgating feature flags
1149*b843c749SSergey Zigachev  *
1150*b843c749SSergey Zigachev  * Walks the list of IPs on the device and updates the clockgating
1151*b843c749SSergey Zigachev  * flags for each IP.
1152*b843c749SSergey Zigachev  * Updates @flags with the feature flags for each hardware IP where
1153*b843c749SSergey Zigachev  * clockgating is enabled.
1154*b843c749SSergey Zigachev  */
1155*b843c749SSergey Zigachev void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1156*b843c749SSergey Zigachev 					    u32 *flags)
1157*b843c749SSergey Zigachev {
1158*b843c749SSergey Zigachev 	int i;
1159*b843c749SSergey Zigachev 
1160*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1161*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1162*b843c749SSergey Zigachev 			continue;
1163*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1164*b843c749SSergey Zigachev 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1165*b843c749SSergey Zigachev 	}
1166*b843c749SSergey Zigachev }
1167*b843c749SSergey Zigachev 
1168*b843c749SSergey Zigachev /**
1169*b843c749SSergey Zigachev  * amdgpu_device_ip_wait_for_idle - wait for idle
1170*b843c749SSergey Zigachev  *
1171*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1172*b843c749SSergey Zigachev  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1173*b843c749SSergey Zigachev  *
1174*b843c749SSergey Zigachev  * Waits for the request hardware IP to be idle.
1175*b843c749SSergey Zigachev  * Returns 0 for success or a negative error code on failure.
1176*b843c749SSergey Zigachev  */
1177*b843c749SSergey Zigachev int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1178*b843c749SSergey Zigachev 				   enum amd_ip_block_type block_type)
1179*b843c749SSergey Zigachev {
1180*b843c749SSergey Zigachev 	int i, r;
1181*b843c749SSergey Zigachev 
1182*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1183*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1184*b843c749SSergey Zigachev 			continue;
1185*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == block_type) {
1186*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1187*b843c749SSergey Zigachev 			if (r)
1188*b843c749SSergey Zigachev 				return r;
1189*b843c749SSergey Zigachev 			break;
1190*b843c749SSergey Zigachev 		}
1191*b843c749SSergey Zigachev 	}
1192*b843c749SSergey Zigachev 	return 0;
1193*b843c749SSergey Zigachev 
1194*b843c749SSergey Zigachev }
1195*b843c749SSergey Zigachev 
1196*b843c749SSergey Zigachev /**
1197*b843c749SSergey Zigachev  * amdgpu_device_ip_is_idle - is the hardware IP idle
1198*b843c749SSergey Zigachev  *
1199*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1200*b843c749SSergey Zigachev  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1201*b843c749SSergey Zigachev  *
1202*b843c749SSergey Zigachev  * Check if the hardware IP is idle or not.
1203*b843c749SSergey Zigachev  * Returns true if it the IP is idle, false if not.
1204*b843c749SSergey Zigachev  */
1205*b843c749SSergey Zigachev bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1206*b843c749SSergey Zigachev 			      enum amd_ip_block_type block_type)
1207*b843c749SSergey Zigachev {
1208*b843c749SSergey Zigachev 	int i;
1209*b843c749SSergey Zigachev 
1210*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1211*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1212*b843c749SSergey Zigachev 			continue;
1213*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == block_type)
1214*b843c749SSergey Zigachev 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1215*b843c749SSergey Zigachev 	}
1216*b843c749SSergey Zigachev 	return true;
1217*b843c749SSergey Zigachev 
1218*b843c749SSergey Zigachev }
1219*b843c749SSergey Zigachev 
1220*b843c749SSergey Zigachev /**
1221*b843c749SSergey Zigachev  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1222*b843c749SSergey Zigachev  *
1223*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1224*b843c749SSergey Zigachev  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1225*b843c749SSergey Zigachev  *
1226*b843c749SSergey Zigachev  * Returns a pointer to the hardware IP block structure
1227*b843c749SSergey Zigachev  * if it exists for the asic, otherwise NULL.
1228*b843c749SSergey Zigachev  */
1229*b843c749SSergey Zigachev struct amdgpu_ip_block *
1230*b843c749SSergey Zigachev amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1231*b843c749SSergey Zigachev 			      enum amd_ip_block_type type)
1232*b843c749SSergey Zigachev {
1233*b843c749SSergey Zigachev 	int i;
1234*b843c749SSergey Zigachev 
1235*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++)
1236*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == type)
1237*b843c749SSergey Zigachev 			return &adev->ip_blocks[i];
1238*b843c749SSergey Zigachev 
1239*b843c749SSergey Zigachev 	return NULL;
1240*b843c749SSergey Zigachev }
1241*b843c749SSergey Zigachev 
1242*b843c749SSergey Zigachev /**
1243*b843c749SSergey Zigachev  * amdgpu_device_ip_block_version_cmp
1244*b843c749SSergey Zigachev  *
1245*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1246*b843c749SSergey Zigachev  * @type: enum amd_ip_block_type
1247*b843c749SSergey Zigachev  * @major: major version
1248*b843c749SSergey Zigachev  * @minor: minor version
1249*b843c749SSergey Zigachev  *
1250*b843c749SSergey Zigachev  * return 0 if equal or greater
1251*b843c749SSergey Zigachev  * return 1 if smaller or the ip_block doesn't exist
1252*b843c749SSergey Zigachev  */
1253*b843c749SSergey Zigachev int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1254*b843c749SSergey Zigachev 				       enum amd_ip_block_type type,
1255*b843c749SSergey Zigachev 				       u32 major, u32 minor)
1256*b843c749SSergey Zigachev {
1257*b843c749SSergey Zigachev 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1258*b843c749SSergey Zigachev 
1259*b843c749SSergey Zigachev 	if (ip_block && ((ip_block->version->major > major) ||
1260*b843c749SSergey Zigachev 			((ip_block->version->major == major) &&
1261*b843c749SSergey Zigachev 			(ip_block->version->minor >= minor))))
1262*b843c749SSergey Zigachev 		return 0;
1263*b843c749SSergey Zigachev 
1264*b843c749SSergey Zigachev 	return 1;
1265*b843c749SSergey Zigachev }
1266*b843c749SSergey Zigachev 
1267*b843c749SSergey Zigachev /**
1268*b843c749SSergey Zigachev  * amdgpu_device_ip_block_add
1269*b843c749SSergey Zigachev  *
1270*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1271*b843c749SSergey Zigachev  * @ip_block_version: pointer to the IP to add
1272*b843c749SSergey Zigachev  *
1273*b843c749SSergey Zigachev  * Adds the IP block driver information to the collection of IPs
1274*b843c749SSergey Zigachev  * on the asic.
1275*b843c749SSergey Zigachev  */
1276*b843c749SSergey Zigachev int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1277*b843c749SSergey Zigachev 			       const struct amdgpu_ip_block_version *ip_block_version)
1278*b843c749SSergey Zigachev {
1279*b843c749SSergey Zigachev 	if (!ip_block_version)
1280*b843c749SSergey Zigachev 		return -EINVAL;
1281*b843c749SSergey Zigachev 
1282*b843c749SSergey Zigachev 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1283*b843c749SSergey Zigachev 		  ip_block_version->funcs->name);
1284*b843c749SSergey Zigachev 
1285*b843c749SSergey Zigachev 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1286*b843c749SSergey Zigachev 
1287*b843c749SSergey Zigachev 	return 0;
1288*b843c749SSergey Zigachev }
1289*b843c749SSergey Zigachev 
1290*b843c749SSergey Zigachev /**
1291*b843c749SSergey Zigachev  * amdgpu_device_enable_virtual_display - enable virtual display feature
1292*b843c749SSergey Zigachev  *
1293*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1294*b843c749SSergey Zigachev  *
1295*b843c749SSergey Zigachev  * Enabled the virtual display feature if the user has enabled it via
1296*b843c749SSergey Zigachev  * the module parameter virtual_display.  This feature provides a virtual
1297*b843c749SSergey Zigachev  * display hardware on headless boards or in virtualized environments.
1298*b843c749SSergey Zigachev  * This function parses and validates the configuration string specified by
1299*b843c749SSergey Zigachev  * the user and configues the virtual display configuration (number of
1300*b843c749SSergey Zigachev  * virtual connectors, crtcs, etc.) specified.
1301*b843c749SSergey Zigachev  */
1302*b843c749SSergey Zigachev static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1303*b843c749SSergey Zigachev {
1304*b843c749SSergey Zigachev 	adev->enable_virtual_display = false;
1305*b843c749SSergey Zigachev 
1306*b843c749SSergey Zigachev 	if (amdgpu_virtual_display) {
1307*b843c749SSergey Zigachev 		struct drm_device *ddev = adev->ddev;
1308*b843c749SSergey Zigachev 		const char *pci_address_name = pci_name(ddev->pdev);
1309*b843c749SSergey Zigachev 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1310*b843c749SSergey Zigachev 
1311*b843c749SSergey Zigachev 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1312*b843c749SSergey Zigachev 		pciaddstr_tmp = pciaddstr;
1313*b843c749SSergey Zigachev 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1314*b843c749SSergey Zigachev 			pciaddname = strsep(&pciaddname_tmp, ",");
1315*b843c749SSergey Zigachev 			if (!strcmp("all", pciaddname)
1316*b843c749SSergey Zigachev 			    || !strcmp(pci_address_name, pciaddname)) {
1317*b843c749SSergey Zigachev 				long num_crtc;
1318*b843c749SSergey Zigachev 				int res = -1;
1319*b843c749SSergey Zigachev 
1320*b843c749SSergey Zigachev 				adev->enable_virtual_display = true;
1321*b843c749SSergey Zigachev 
1322*b843c749SSergey Zigachev 				if (pciaddname_tmp)
1323*b843c749SSergey Zigachev 					res = kstrtol(pciaddname_tmp, 10,
1324*b843c749SSergey Zigachev 						      &num_crtc);
1325*b843c749SSergey Zigachev 
1326*b843c749SSergey Zigachev 				if (!res) {
1327*b843c749SSergey Zigachev 					if (num_crtc < 1)
1328*b843c749SSergey Zigachev 						num_crtc = 1;
1329*b843c749SSergey Zigachev 					if (num_crtc > 6)
1330*b843c749SSergey Zigachev 						num_crtc = 6;
1331*b843c749SSergey Zigachev 					adev->mode_info.num_crtc = num_crtc;
1332*b843c749SSergey Zigachev 				} else {
1333*b843c749SSergey Zigachev 					adev->mode_info.num_crtc = 1;
1334*b843c749SSergey Zigachev 				}
1335*b843c749SSergey Zigachev 				break;
1336*b843c749SSergey Zigachev 			}
1337*b843c749SSergey Zigachev 		}
1338*b843c749SSergey Zigachev 
1339*b843c749SSergey Zigachev 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1340*b843c749SSergey Zigachev 			 amdgpu_virtual_display, pci_address_name,
1341*b843c749SSergey Zigachev 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1342*b843c749SSergey Zigachev 
1343*b843c749SSergey Zigachev 		kfree(pciaddstr);
1344*b843c749SSergey Zigachev 	}
1345*b843c749SSergey Zigachev }
1346*b843c749SSergey Zigachev 
1347*b843c749SSergey Zigachev /**
1348*b843c749SSergey Zigachev  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1349*b843c749SSergey Zigachev  *
1350*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1351*b843c749SSergey Zigachev  *
1352*b843c749SSergey Zigachev  * Parses the asic configuration parameters specified in the gpu info
1353*b843c749SSergey Zigachev  * firmware and makes them availale to the driver for use in configuring
1354*b843c749SSergey Zigachev  * the asic.
1355*b843c749SSergey Zigachev  * Returns 0 on success, -EINVAL on failure.
1356*b843c749SSergey Zigachev  */
1357*b843c749SSergey Zigachev static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1358*b843c749SSergey Zigachev {
1359*b843c749SSergey Zigachev 	const char *chip_name;
1360*b843c749SSergey Zigachev 	char fw_name[30];
1361*b843c749SSergey Zigachev 	int err;
1362*b843c749SSergey Zigachev 	const struct gpu_info_firmware_header_v1_0 *hdr;
1363*b843c749SSergey Zigachev 
1364*b843c749SSergey Zigachev 	adev->firmware.gpu_info_fw = NULL;
1365*b843c749SSergey Zigachev 
1366*b843c749SSergey Zigachev 	switch (adev->asic_type) {
1367*b843c749SSergey Zigachev 	case CHIP_TOPAZ:
1368*b843c749SSergey Zigachev 	case CHIP_TONGA:
1369*b843c749SSergey Zigachev 	case CHIP_FIJI:
1370*b843c749SSergey Zigachev 	case CHIP_POLARIS10:
1371*b843c749SSergey Zigachev 	case CHIP_POLARIS11:
1372*b843c749SSergey Zigachev 	case CHIP_POLARIS12:
1373*b843c749SSergey Zigachev 	case CHIP_VEGAM:
1374*b843c749SSergey Zigachev 	case CHIP_CARRIZO:
1375*b843c749SSergey Zigachev 	case CHIP_STONEY:
1376*b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_SI
1377*b843c749SSergey Zigachev 	case CHIP_VERDE:
1378*b843c749SSergey Zigachev 	case CHIP_TAHITI:
1379*b843c749SSergey Zigachev 	case CHIP_PITCAIRN:
1380*b843c749SSergey Zigachev 	case CHIP_OLAND:
1381*b843c749SSergey Zigachev 	case CHIP_HAINAN:
1382*b843c749SSergey Zigachev #endif
1383*b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_CIK
1384*b843c749SSergey Zigachev 	case CHIP_BONAIRE:
1385*b843c749SSergey Zigachev 	case CHIP_HAWAII:
1386*b843c749SSergey Zigachev 	case CHIP_KAVERI:
1387*b843c749SSergey Zigachev 	case CHIP_KABINI:
1388*b843c749SSergey Zigachev 	case CHIP_MULLINS:
1389*b843c749SSergey Zigachev #endif
1390*b843c749SSergey Zigachev 	case CHIP_VEGA20:
1391*b843c749SSergey Zigachev 	default:
1392*b843c749SSergey Zigachev 		return 0;
1393*b843c749SSergey Zigachev 	case CHIP_VEGA10:
1394*b843c749SSergey Zigachev 		chip_name = "vega10";
1395*b843c749SSergey Zigachev 		break;
1396*b843c749SSergey Zigachev 	case CHIP_VEGA12:
1397*b843c749SSergey Zigachev 		chip_name = "vega12";
1398*b843c749SSergey Zigachev 		break;
1399*b843c749SSergey Zigachev 	case CHIP_RAVEN:
1400*b843c749SSergey Zigachev 		chip_name = "raven";
1401*b843c749SSergey Zigachev 		break;
1402*b843c749SSergey Zigachev 	}
1403*b843c749SSergey Zigachev 
1404*b843c749SSergey Zigachev 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1405*b843c749SSergey Zigachev 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1406*b843c749SSergey Zigachev 	if (err) {
1407*b843c749SSergey Zigachev 		dev_err(adev->dev,
1408*b843c749SSergey Zigachev 			"Failed to load gpu_info firmware \"%s\"\n",
1409*b843c749SSergey Zigachev 			fw_name);
1410*b843c749SSergey Zigachev 		goto out;
1411*b843c749SSergey Zigachev 	}
1412*b843c749SSergey Zigachev 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1413*b843c749SSergey Zigachev 	if (err) {
1414*b843c749SSergey Zigachev 		dev_err(adev->dev,
1415*b843c749SSergey Zigachev 			"Failed to validate gpu_info firmware \"%s\"\n",
1416*b843c749SSergey Zigachev 			fw_name);
1417*b843c749SSergey Zigachev 		goto out;
1418*b843c749SSergey Zigachev 	}
1419*b843c749SSergey Zigachev 
1420*b843c749SSergey Zigachev 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1421*b843c749SSergey Zigachev 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1422*b843c749SSergey Zigachev 
1423*b843c749SSergey Zigachev 	switch (hdr->version_major) {
1424*b843c749SSergey Zigachev 	case 1:
1425*b843c749SSergey Zigachev 	{
1426*b843c749SSergey Zigachev 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1427*b843c749SSergey Zigachev 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1428*b843c749SSergey Zigachev 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1429*b843c749SSergey Zigachev 
1430*b843c749SSergey Zigachev 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1431*b843c749SSergey Zigachev 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1432*b843c749SSergey Zigachev 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1433*b843c749SSergey Zigachev 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1434*b843c749SSergey Zigachev 		adev->gfx.config.max_texture_channel_caches =
1435*b843c749SSergey Zigachev 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1436*b843c749SSergey Zigachev 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1437*b843c749SSergey Zigachev 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1438*b843c749SSergey Zigachev 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1439*b843c749SSergey Zigachev 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1440*b843c749SSergey Zigachev 		adev->gfx.config.double_offchip_lds_buf =
1441*b843c749SSergey Zigachev 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1442*b843c749SSergey Zigachev 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1443*b843c749SSergey Zigachev 		adev->gfx.cu_info.max_waves_per_simd =
1444*b843c749SSergey Zigachev 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1445*b843c749SSergey Zigachev 		adev->gfx.cu_info.max_scratch_slots_per_cu =
1446*b843c749SSergey Zigachev 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1447*b843c749SSergey Zigachev 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1448*b843c749SSergey Zigachev 		break;
1449*b843c749SSergey Zigachev 	}
1450*b843c749SSergey Zigachev 	default:
1451*b843c749SSergey Zigachev 		dev_err(adev->dev,
1452*b843c749SSergey Zigachev 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1453*b843c749SSergey Zigachev 		err = -EINVAL;
1454*b843c749SSergey Zigachev 		goto out;
1455*b843c749SSergey Zigachev 	}
1456*b843c749SSergey Zigachev out:
1457*b843c749SSergey Zigachev 	return err;
1458*b843c749SSergey Zigachev }
1459*b843c749SSergey Zigachev 
1460*b843c749SSergey Zigachev /**
1461*b843c749SSergey Zigachev  * amdgpu_device_ip_early_init - run early init for hardware IPs
1462*b843c749SSergey Zigachev  *
1463*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1464*b843c749SSergey Zigachev  *
1465*b843c749SSergey Zigachev  * Early initialization pass for hardware IPs.  The hardware IPs that make
1466*b843c749SSergey Zigachev  * up each asic are discovered each IP's early_init callback is run.  This
1467*b843c749SSergey Zigachev  * is the first stage in initializing the asic.
1468*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1469*b843c749SSergey Zigachev  */
1470*b843c749SSergey Zigachev static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1471*b843c749SSergey Zigachev {
1472*b843c749SSergey Zigachev 	int i, r;
1473*b843c749SSergey Zigachev 
1474*b843c749SSergey Zigachev 	amdgpu_device_enable_virtual_display(adev);
1475*b843c749SSergey Zigachev 
1476*b843c749SSergey Zigachev 	switch (adev->asic_type) {
1477*b843c749SSergey Zigachev 	case CHIP_TOPAZ:
1478*b843c749SSergey Zigachev 	case CHIP_TONGA:
1479*b843c749SSergey Zigachev 	case CHIP_FIJI:
1480*b843c749SSergey Zigachev 	case CHIP_POLARIS10:
1481*b843c749SSergey Zigachev 	case CHIP_POLARIS11:
1482*b843c749SSergey Zigachev 	case CHIP_POLARIS12:
1483*b843c749SSergey Zigachev 	case CHIP_VEGAM:
1484*b843c749SSergey Zigachev 	case CHIP_CARRIZO:
1485*b843c749SSergey Zigachev 	case CHIP_STONEY:
1486*b843c749SSergey Zigachev 		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1487*b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_CZ;
1488*b843c749SSergey Zigachev 		else
1489*b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_VI;
1490*b843c749SSergey Zigachev 
1491*b843c749SSergey Zigachev 		r = vi_set_ip_blocks(adev);
1492*b843c749SSergey Zigachev 		if (r)
1493*b843c749SSergey Zigachev 			return r;
1494*b843c749SSergey Zigachev 		break;
1495*b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_SI
1496*b843c749SSergey Zigachev 	case CHIP_VERDE:
1497*b843c749SSergey Zigachev 	case CHIP_TAHITI:
1498*b843c749SSergey Zigachev 	case CHIP_PITCAIRN:
1499*b843c749SSergey Zigachev 	case CHIP_OLAND:
1500*b843c749SSergey Zigachev 	case CHIP_HAINAN:
1501*b843c749SSergey Zigachev 		adev->family = AMDGPU_FAMILY_SI;
1502*b843c749SSergey Zigachev 		r = si_set_ip_blocks(adev);
1503*b843c749SSergey Zigachev 		if (r)
1504*b843c749SSergey Zigachev 			return r;
1505*b843c749SSergey Zigachev 		break;
1506*b843c749SSergey Zigachev #endif
1507*b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_CIK
1508*b843c749SSergey Zigachev 	case CHIP_BONAIRE:
1509*b843c749SSergey Zigachev 	case CHIP_HAWAII:
1510*b843c749SSergey Zigachev 	case CHIP_KAVERI:
1511*b843c749SSergey Zigachev 	case CHIP_KABINI:
1512*b843c749SSergey Zigachev 	case CHIP_MULLINS:
1513*b843c749SSergey Zigachev 		if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1514*b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_CI;
1515*b843c749SSergey Zigachev 		else
1516*b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_KV;
1517*b843c749SSergey Zigachev 
1518*b843c749SSergey Zigachev 		r = cik_set_ip_blocks(adev);
1519*b843c749SSergey Zigachev 		if (r)
1520*b843c749SSergey Zigachev 			return r;
1521*b843c749SSergey Zigachev 		break;
1522*b843c749SSergey Zigachev #endif
1523*b843c749SSergey Zigachev 	case CHIP_VEGA10:
1524*b843c749SSergey Zigachev 	case CHIP_VEGA12:
1525*b843c749SSergey Zigachev 	case CHIP_VEGA20:
1526*b843c749SSergey Zigachev 	case CHIP_RAVEN:
1527*b843c749SSergey Zigachev 		if (adev->asic_type == CHIP_RAVEN)
1528*b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_RV;
1529*b843c749SSergey Zigachev 		else
1530*b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_AI;
1531*b843c749SSergey Zigachev 
1532*b843c749SSergey Zigachev 		r = soc15_set_ip_blocks(adev);
1533*b843c749SSergey Zigachev 		if (r)
1534*b843c749SSergey Zigachev 			return r;
1535*b843c749SSergey Zigachev 		break;
1536*b843c749SSergey Zigachev 	default:
1537*b843c749SSergey Zigachev 		/* FIXME: not supported yet */
1538*b843c749SSergey Zigachev 		return -EINVAL;
1539*b843c749SSergey Zigachev 	}
1540*b843c749SSergey Zigachev 
1541*b843c749SSergey Zigachev 	r = amdgpu_device_parse_gpu_info_fw(adev);
1542*b843c749SSergey Zigachev 	if (r)
1543*b843c749SSergey Zigachev 		return r;
1544*b843c749SSergey Zigachev 
1545*b843c749SSergey Zigachev 	amdgpu_amdkfd_device_probe(adev);
1546*b843c749SSergey Zigachev 
1547*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev)) {
1548*b843c749SSergey Zigachev 		r = amdgpu_virt_request_full_gpu(adev, true);
1549*b843c749SSergey Zigachev 		if (r)
1550*b843c749SSergey Zigachev 			return -EAGAIN;
1551*b843c749SSergey Zigachev 	}
1552*b843c749SSergey Zigachev 
1553*b843c749SSergey Zigachev 	adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
1554*b843c749SSergey Zigachev 
1555*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1556*b843c749SSergey Zigachev 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1557*b843c749SSergey Zigachev 			DRM_ERROR("disabled ip block: %d <%s>\n",
1558*b843c749SSergey Zigachev 				  i, adev->ip_blocks[i].version->funcs->name);
1559*b843c749SSergey Zigachev 			adev->ip_blocks[i].status.valid = false;
1560*b843c749SSergey Zigachev 		} else {
1561*b843c749SSergey Zigachev 			if (adev->ip_blocks[i].version->funcs->early_init) {
1562*b843c749SSergey Zigachev 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1563*b843c749SSergey Zigachev 				if (r == -ENOENT) {
1564*b843c749SSergey Zigachev 					adev->ip_blocks[i].status.valid = false;
1565*b843c749SSergey Zigachev 				} else if (r) {
1566*b843c749SSergey Zigachev 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
1567*b843c749SSergey Zigachev 						  adev->ip_blocks[i].version->funcs->name, r);
1568*b843c749SSergey Zigachev 					return r;
1569*b843c749SSergey Zigachev 				} else {
1570*b843c749SSergey Zigachev 					adev->ip_blocks[i].status.valid = true;
1571*b843c749SSergey Zigachev 				}
1572*b843c749SSergey Zigachev 			} else {
1573*b843c749SSergey Zigachev 				adev->ip_blocks[i].status.valid = true;
1574*b843c749SSergey Zigachev 			}
1575*b843c749SSergey Zigachev 		}
1576*b843c749SSergey Zigachev 	}
1577*b843c749SSergey Zigachev 
1578*b843c749SSergey Zigachev 	adev->cg_flags &= amdgpu_cg_mask;
1579*b843c749SSergey Zigachev 	adev->pg_flags &= amdgpu_pg_mask;
1580*b843c749SSergey Zigachev 
1581*b843c749SSergey Zigachev 	return 0;
1582*b843c749SSergey Zigachev }
1583*b843c749SSergey Zigachev 
1584*b843c749SSergey Zigachev /**
1585*b843c749SSergey Zigachev  * amdgpu_device_ip_init - run init for hardware IPs
1586*b843c749SSergey Zigachev  *
1587*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1588*b843c749SSergey Zigachev  *
1589*b843c749SSergey Zigachev  * Main initialization pass for hardware IPs.  The list of all the hardware
1590*b843c749SSergey Zigachev  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1591*b843c749SSergey Zigachev  * are run.  sw_init initializes the software state associated with each IP
1592*b843c749SSergey Zigachev  * and hw_init initializes the hardware associated with each IP.
1593*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1594*b843c749SSergey Zigachev  */
1595*b843c749SSergey Zigachev static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1596*b843c749SSergey Zigachev {
1597*b843c749SSergey Zigachev 	int i, r;
1598*b843c749SSergey Zigachev 
1599*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1600*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1601*b843c749SSergey Zigachev 			continue;
1602*b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1603*b843c749SSergey Zigachev 		if (r) {
1604*b843c749SSergey Zigachev 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1605*b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1606*b843c749SSergey Zigachev 			return r;
1607*b843c749SSergey Zigachev 		}
1608*b843c749SSergey Zigachev 		adev->ip_blocks[i].status.sw = true;
1609*b843c749SSergey Zigachev 
1610*b843c749SSergey Zigachev 		/* need to do gmc hw init early so we can allocate gpu mem */
1611*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1612*b843c749SSergey Zigachev 			r = amdgpu_device_vram_scratch_init(adev);
1613*b843c749SSergey Zigachev 			if (r) {
1614*b843c749SSergey Zigachev 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1615*b843c749SSergey Zigachev 				return r;
1616*b843c749SSergey Zigachev 			}
1617*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1618*b843c749SSergey Zigachev 			if (r) {
1619*b843c749SSergey Zigachev 				DRM_ERROR("hw_init %d failed %d\n", i, r);
1620*b843c749SSergey Zigachev 				return r;
1621*b843c749SSergey Zigachev 			}
1622*b843c749SSergey Zigachev 			r = amdgpu_device_wb_init(adev);
1623*b843c749SSergey Zigachev 			if (r) {
1624*b843c749SSergey Zigachev 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1625*b843c749SSergey Zigachev 				return r;
1626*b843c749SSergey Zigachev 			}
1627*b843c749SSergey Zigachev 			adev->ip_blocks[i].status.hw = true;
1628*b843c749SSergey Zigachev 
1629*b843c749SSergey Zigachev 			/* right after GMC hw init, we create CSA */
1630*b843c749SSergey Zigachev 			if (amdgpu_sriov_vf(adev)) {
1631*b843c749SSergey Zigachev 				r = amdgpu_allocate_static_csa(adev);
1632*b843c749SSergey Zigachev 				if (r) {
1633*b843c749SSergey Zigachev 					DRM_ERROR("allocate CSA failed %d\n", r);
1634*b843c749SSergey Zigachev 					return r;
1635*b843c749SSergey Zigachev 				}
1636*b843c749SSergey Zigachev 			}
1637*b843c749SSergey Zigachev 		}
1638*b843c749SSergey Zigachev 	}
1639*b843c749SSergey Zigachev 
1640*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1641*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.sw)
1642*b843c749SSergey Zigachev 			continue;
1643*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].status.hw)
1644*b843c749SSergey Zigachev 			continue;
1645*b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1646*b843c749SSergey Zigachev 		if (r) {
1647*b843c749SSergey Zigachev 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1648*b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1649*b843c749SSergey Zigachev 			return r;
1650*b843c749SSergey Zigachev 		}
1651*b843c749SSergey Zigachev 		adev->ip_blocks[i].status.hw = true;
1652*b843c749SSergey Zigachev 	}
1653*b843c749SSergey Zigachev 
1654*b843c749SSergey Zigachev 	amdgpu_amdkfd_device_init(adev);
1655*b843c749SSergey Zigachev 
1656*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev)) {
1657*b843c749SSergey Zigachev 		amdgpu_virt_init_data_exchange(adev);
1658*b843c749SSergey Zigachev 		amdgpu_virt_release_full_gpu(adev, true);
1659*b843c749SSergey Zigachev 	}
1660*b843c749SSergey Zigachev 
1661*b843c749SSergey Zigachev 	return 0;
1662*b843c749SSergey Zigachev }
1663*b843c749SSergey Zigachev 
1664*b843c749SSergey Zigachev /**
1665*b843c749SSergey Zigachev  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1666*b843c749SSergey Zigachev  *
1667*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1668*b843c749SSergey Zigachev  *
1669*b843c749SSergey Zigachev  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
1670*b843c749SSergey Zigachev  * this function before a GPU reset.  If the value is retained after a
1671*b843c749SSergey Zigachev  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
1672*b843c749SSergey Zigachev  */
1673*b843c749SSergey Zigachev static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
1674*b843c749SSergey Zigachev {
1675*b843c749SSergey Zigachev 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1676*b843c749SSergey Zigachev }
1677*b843c749SSergey Zigachev 
1678*b843c749SSergey Zigachev /**
1679*b843c749SSergey Zigachev  * amdgpu_device_check_vram_lost - check if vram is valid
1680*b843c749SSergey Zigachev  *
1681*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1682*b843c749SSergey Zigachev  *
1683*b843c749SSergey Zigachev  * Checks the reset magic value written to the gart pointer in VRAM.
1684*b843c749SSergey Zigachev  * The driver calls this after a GPU reset to see if the contents of
1685*b843c749SSergey Zigachev  * VRAM is lost or now.
1686*b843c749SSergey Zigachev  * returns true if vram is lost, false if not.
1687*b843c749SSergey Zigachev  */
1688*b843c749SSergey Zigachev static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
1689*b843c749SSergey Zigachev {
1690*b843c749SSergey Zigachev 	return !!memcmp(adev->gart.ptr, adev->reset_magic,
1691*b843c749SSergey Zigachev 			AMDGPU_RESET_MAGIC_NUM);
1692*b843c749SSergey Zigachev }
1693*b843c749SSergey Zigachev 
1694*b843c749SSergey Zigachev /**
1695*b843c749SSergey Zigachev  * amdgpu_device_ip_late_set_cg_state - late init for clockgating
1696*b843c749SSergey Zigachev  *
1697*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1698*b843c749SSergey Zigachev  *
1699*b843c749SSergey Zigachev  * Late initialization pass enabling clockgating for hardware IPs.
1700*b843c749SSergey Zigachev  * The list of all the hardware IPs that make up the asic is walked and the
1701*b843c749SSergey Zigachev  * set_clockgating_state callbacks are run.  This stage is run late
1702*b843c749SSergey Zigachev  * in the init process.
1703*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1704*b843c749SSergey Zigachev  */
1705*b843c749SSergey Zigachev static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
1706*b843c749SSergey Zigachev {
1707*b843c749SSergey Zigachev 	int i = 0, r;
1708*b843c749SSergey Zigachev 
1709*b843c749SSergey Zigachev 	if (amdgpu_emu_mode == 1)
1710*b843c749SSergey Zigachev 		return 0;
1711*b843c749SSergey Zigachev 
1712*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1713*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1714*b843c749SSergey Zigachev 			continue;
1715*b843c749SSergey Zigachev 		/* skip CG for VCE/UVD, it's handled specially */
1716*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1717*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1718*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1719*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1720*b843c749SSergey Zigachev 			/* enable clockgating to save power */
1721*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1722*b843c749SSergey Zigachev 										     AMD_CG_STATE_GATE);
1723*b843c749SSergey Zigachev 			if (r) {
1724*b843c749SSergey Zigachev 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1725*b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1726*b843c749SSergey Zigachev 				return r;
1727*b843c749SSergey Zigachev 			}
1728*b843c749SSergey Zigachev 		}
1729*b843c749SSergey Zigachev 	}
1730*b843c749SSergey Zigachev 
1731*b843c749SSergey Zigachev 	return 0;
1732*b843c749SSergey Zigachev }
1733*b843c749SSergey Zigachev 
1734*b843c749SSergey Zigachev static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
1735*b843c749SSergey Zigachev {
1736*b843c749SSergey Zigachev 	int i = 0, r;
1737*b843c749SSergey Zigachev 
1738*b843c749SSergey Zigachev 	if (amdgpu_emu_mode == 1)
1739*b843c749SSergey Zigachev 		return 0;
1740*b843c749SSergey Zigachev 
1741*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1742*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1743*b843c749SSergey Zigachev 			continue;
1744*b843c749SSergey Zigachev 		/* skip CG for VCE/UVD, it's handled specially */
1745*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1746*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1747*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1748*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
1749*b843c749SSergey Zigachev 			/* enable powergating to save power */
1750*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1751*b843c749SSergey Zigachev 										     AMD_PG_STATE_GATE);
1752*b843c749SSergey Zigachev 			if (r) {
1753*b843c749SSergey Zigachev 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
1754*b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1755*b843c749SSergey Zigachev 				return r;
1756*b843c749SSergey Zigachev 			}
1757*b843c749SSergey Zigachev 		}
1758*b843c749SSergey Zigachev 	}
1759*b843c749SSergey Zigachev 	return 0;
1760*b843c749SSergey Zigachev }
1761*b843c749SSergey Zigachev 
1762*b843c749SSergey Zigachev /**
1763*b843c749SSergey Zigachev  * amdgpu_device_ip_late_init - run late init for hardware IPs
1764*b843c749SSergey Zigachev  *
1765*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1766*b843c749SSergey Zigachev  *
1767*b843c749SSergey Zigachev  * Late initialization pass for hardware IPs.  The list of all the hardware
1768*b843c749SSergey Zigachev  * IPs that make up the asic is walked and the late_init callbacks are run.
1769*b843c749SSergey Zigachev  * late_init covers any special initialization that an IP requires
1770*b843c749SSergey Zigachev  * after all of the have been initialized or something that needs to happen
1771*b843c749SSergey Zigachev  * late in the init process.
1772*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1773*b843c749SSergey Zigachev  */
1774*b843c749SSergey Zigachev static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
1775*b843c749SSergey Zigachev {
1776*b843c749SSergey Zigachev 	int i = 0, r;
1777*b843c749SSergey Zigachev 
1778*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1779*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1780*b843c749SSergey Zigachev 			continue;
1781*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->funcs->late_init) {
1782*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1783*b843c749SSergey Zigachev 			if (r) {
1784*b843c749SSergey Zigachev 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
1785*b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1786*b843c749SSergey Zigachev 				return r;
1787*b843c749SSergey Zigachev 			}
1788*b843c749SSergey Zigachev 			adev->ip_blocks[i].status.late_initialized = true;
1789*b843c749SSergey Zigachev 		}
1790*b843c749SSergey Zigachev 	}
1791*b843c749SSergey Zigachev 
1792*b843c749SSergey Zigachev 	amdgpu_device_ip_late_set_cg_state(adev);
1793*b843c749SSergey Zigachev 	amdgpu_device_ip_late_set_pg_state(adev);
1794*b843c749SSergey Zigachev 
1795*b843c749SSergey Zigachev 	queue_delayed_work(system_wq, &adev->late_init_work,
1796*b843c749SSergey Zigachev 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
1797*b843c749SSergey Zigachev 
1798*b843c749SSergey Zigachev 	amdgpu_device_fill_reset_magic(adev);
1799*b843c749SSergey Zigachev 
1800*b843c749SSergey Zigachev 	return 0;
1801*b843c749SSergey Zigachev }
1802*b843c749SSergey Zigachev 
1803*b843c749SSergey Zigachev /**
1804*b843c749SSergey Zigachev  * amdgpu_device_ip_fini - run fini for hardware IPs
1805*b843c749SSergey Zigachev  *
1806*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1807*b843c749SSergey Zigachev  *
1808*b843c749SSergey Zigachev  * Main teardown pass for hardware IPs.  The list of all the hardware
1809*b843c749SSergey Zigachev  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
1810*b843c749SSergey Zigachev  * are run.  hw_fini tears down the hardware associated with each IP
1811*b843c749SSergey Zigachev  * and sw_fini tears down any software state associated with each IP.
1812*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1813*b843c749SSergey Zigachev  */
1814*b843c749SSergey Zigachev static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1815*b843c749SSergey Zigachev {
1816*b843c749SSergey Zigachev 	int i, r;
1817*b843c749SSergey Zigachev 
1818*b843c749SSergey Zigachev 	amdgpu_amdkfd_device_fini(adev);
1819*b843c749SSergey Zigachev 	/* need to disable SMC first */
1820*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1821*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.hw)
1822*b843c749SSergey Zigachev 			continue;
1823*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1824*b843c749SSergey Zigachev 			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1825*b843c749SSergey Zigachev 			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1826*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1827*b843c749SSergey Zigachev 										     AMD_CG_STATE_UNGATE);
1828*b843c749SSergey Zigachev 			if (r) {
1829*b843c749SSergey Zigachev 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1830*b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1831*b843c749SSergey Zigachev 				return r;
1832*b843c749SSergey Zigachev 			}
1833*b843c749SSergey Zigachev 			if (adev->powerplay.pp_funcs->set_powergating_by_smu)
1834*b843c749SSergey Zigachev 				amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
1835*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1836*b843c749SSergey Zigachev 			/* XXX handle errors */
1837*b843c749SSergey Zigachev 			if (r) {
1838*b843c749SSergey Zigachev 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1839*b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1840*b843c749SSergey Zigachev 			}
1841*b843c749SSergey Zigachev 			adev->ip_blocks[i].status.hw = false;
1842*b843c749SSergey Zigachev 			break;
1843*b843c749SSergey Zigachev 		}
1844*b843c749SSergey Zigachev 	}
1845*b843c749SSergey Zigachev 
1846*b843c749SSergey Zigachev 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1847*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.hw)
1848*b843c749SSergey Zigachev 			continue;
1849*b843c749SSergey Zigachev 
1850*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1851*b843c749SSergey Zigachev 			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1852*b843c749SSergey Zigachev 			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1853*b843c749SSergey Zigachev 			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1854*b843c749SSergey Zigachev 			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1855*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1856*b843c749SSergey Zigachev 										     AMD_CG_STATE_UNGATE);
1857*b843c749SSergey Zigachev 			if (r) {
1858*b843c749SSergey Zigachev 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1859*b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1860*b843c749SSergey Zigachev 				return r;
1861*b843c749SSergey Zigachev 			}
1862*b843c749SSergey Zigachev 		}
1863*b843c749SSergey Zigachev 
1864*b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1865*b843c749SSergey Zigachev 		/* XXX handle errors */
1866*b843c749SSergey Zigachev 		if (r) {
1867*b843c749SSergey Zigachev 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1868*b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1869*b843c749SSergey Zigachev 		}
1870*b843c749SSergey Zigachev 
1871*b843c749SSergey Zigachev 		adev->ip_blocks[i].status.hw = false;
1872*b843c749SSergey Zigachev 	}
1873*b843c749SSergey Zigachev 
1874*b843c749SSergey Zigachev 
1875*b843c749SSergey Zigachev 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1876*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.sw)
1877*b843c749SSergey Zigachev 			continue;
1878*b843c749SSergey Zigachev 
1879*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1880*b843c749SSergey Zigachev 			amdgpu_free_static_csa(adev);
1881*b843c749SSergey Zigachev 			amdgpu_device_wb_fini(adev);
1882*b843c749SSergey Zigachev 			amdgpu_device_vram_scratch_fini(adev);
1883*b843c749SSergey Zigachev 		}
1884*b843c749SSergey Zigachev 
1885*b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1886*b843c749SSergey Zigachev 		/* XXX handle errors */
1887*b843c749SSergey Zigachev 		if (r) {
1888*b843c749SSergey Zigachev 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1889*b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1890*b843c749SSergey Zigachev 		}
1891*b843c749SSergey Zigachev 		adev->ip_blocks[i].status.sw = false;
1892*b843c749SSergey Zigachev 		adev->ip_blocks[i].status.valid = false;
1893*b843c749SSergey Zigachev 	}
1894*b843c749SSergey Zigachev 
1895*b843c749SSergey Zigachev 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1896*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.late_initialized)
1897*b843c749SSergey Zigachev 			continue;
1898*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->funcs->late_fini)
1899*b843c749SSergey Zigachev 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1900*b843c749SSergey Zigachev 		adev->ip_blocks[i].status.late_initialized = false;
1901*b843c749SSergey Zigachev 	}
1902*b843c749SSergey Zigachev 
1903*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
1904*b843c749SSergey Zigachev 		if (amdgpu_virt_release_full_gpu(adev, false))
1905*b843c749SSergey Zigachev 			DRM_ERROR("failed to release exclusive mode on fini\n");
1906*b843c749SSergey Zigachev 
1907*b843c749SSergey Zigachev 	return 0;
1908*b843c749SSergey Zigachev }
1909*b843c749SSergey Zigachev 
1910*b843c749SSergey Zigachev /**
1911*b843c749SSergey Zigachev  * amdgpu_device_ip_late_init_func_handler - work handler for clockgating
1912*b843c749SSergey Zigachev  *
1913*b843c749SSergey Zigachev  * @work: work_struct
1914*b843c749SSergey Zigachev  *
1915*b843c749SSergey Zigachev  * Work handler for amdgpu_device_ip_late_set_cg_state.  We put the
1916*b843c749SSergey Zigachev  * clockgating setup into a worker thread to speed up driver init and
1917*b843c749SSergey Zigachev  * resume from suspend.
1918*b843c749SSergey Zigachev  */
1919*b843c749SSergey Zigachev static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
1920*b843c749SSergey Zigachev {
1921*b843c749SSergey Zigachev 	struct amdgpu_device *adev =
1922*b843c749SSergey Zigachev 		container_of(work, struct amdgpu_device, late_init_work.work);
1923*b843c749SSergey Zigachev 	int r;
1924*b843c749SSergey Zigachev 
1925*b843c749SSergey Zigachev 	r = amdgpu_ib_ring_tests(adev);
1926*b843c749SSergey Zigachev 	if (r)
1927*b843c749SSergey Zigachev 		DRM_ERROR("ib ring test failed (%d).\n", r);
1928*b843c749SSergey Zigachev }
1929*b843c749SSergey Zigachev 
1930*b843c749SSergey Zigachev /**
1931*b843c749SSergey Zigachev  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
1932*b843c749SSergey Zigachev  *
1933*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1934*b843c749SSergey Zigachev  *
1935*b843c749SSergey Zigachev  * Main suspend function for hardware IPs.  The list of all the hardware
1936*b843c749SSergey Zigachev  * IPs that make up the asic is walked, clockgating is disabled and the
1937*b843c749SSergey Zigachev  * suspend callbacks are run.  suspend puts the hardware and software state
1938*b843c749SSergey Zigachev  * in each IP into a state suitable for suspend.
1939*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1940*b843c749SSergey Zigachev  */
1941*b843c749SSergey Zigachev static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
1942*b843c749SSergey Zigachev {
1943*b843c749SSergey Zigachev 	int i, r;
1944*b843c749SSergey Zigachev 
1945*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
1946*b843c749SSergey Zigachev 		amdgpu_virt_request_full_gpu(adev, false);
1947*b843c749SSergey Zigachev 
1948*b843c749SSergey Zigachev 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1949*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1950*b843c749SSergey Zigachev 			continue;
1951*b843c749SSergey Zigachev 		/* displays are handled separately */
1952*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
1953*b843c749SSergey Zigachev 			/* ungate blocks so that suspend can properly shut them down */
1954*b843c749SSergey Zigachev 			if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1955*b843c749SSergey Zigachev 				r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1956*b843c749SSergey Zigachev 											     AMD_CG_STATE_UNGATE);
1957*b843c749SSergey Zigachev 				if (r) {
1958*b843c749SSergey Zigachev 					DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1959*b843c749SSergey Zigachev 						  adev->ip_blocks[i].version->funcs->name, r);
1960*b843c749SSergey Zigachev 				}
1961*b843c749SSergey Zigachev 			}
1962*b843c749SSergey Zigachev 			/* XXX handle errors */
1963*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->suspend(adev);
1964*b843c749SSergey Zigachev 			/* XXX handle errors */
1965*b843c749SSergey Zigachev 			if (r) {
1966*b843c749SSergey Zigachev 				DRM_ERROR("suspend of IP block <%s> failed %d\n",
1967*b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1968*b843c749SSergey Zigachev 			}
1969*b843c749SSergey Zigachev 		}
1970*b843c749SSergey Zigachev 	}
1971*b843c749SSergey Zigachev 
1972*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
1973*b843c749SSergey Zigachev 		amdgpu_virt_release_full_gpu(adev, false);
1974*b843c749SSergey Zigachev 
1975*b843c749SSergey Zigachev 	return 0;
1976*b843c749SSergey Zigachev }
1977*b843c749SSergey Zigachev 
1978*b843c749SSergey Zigachev /**
1979*b843c749SSergey Zigachev  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
1980*b843c749SSergey Zigachev  *
1981*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1982*b843c749SSergey Zigachev  *
1983*b843c749SSergey Zigachev  * Main suspend function for hardware IPs.  The list of all the hardware
1984*b843c749SSergey Zigachev  * IPs that make up the asic is walked, clockgating is disabled and the
1985*b843c749SSergey Zigachev  * suspend callbacks are run.  suspend puts the hardware and software state
1986*b843c749SSergey Zigachev  * in each IP into a state suitable for suspend.
1987*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1988*b843c749SSergey Zigachev  */
1989*b843c749SSergey Zigachev static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
1990*b843c749SSergey Zigachev {
1991*b843c749SSergey Zigachev 	int i, r;
1992*b843c749SSergey Zigachev 
1993*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
1994*b843c749SSergey Zigachev 		amdgpu_virt_request_full_gpu(adev, false);
1995*b843c749SSergey Zigachev 
1996*b843c749SSergey Zigachev 	/* ungate SMC block first */
1997*b843c749SSergey Zigachev 	r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1998*b843c749SSergey Zigachev 						   AMD_CG_STATE_UNGATE);
1999*b843c749SSergey Zigachev 	if (r) {
2000*b843c749SSergey Zigachev 		DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
2001*b843c749SSergey Zigachev 	}
2002*b843c749SSergey Zigachev 
2003*b843c749SSergey Zigachev 	/* call smu to disable gfx off feature first when suspend */
2004*b843c749SSergey Zigachev 	if (adev->powerplay.pp_funcs->set_powergating_by_smu)
2005*b843c749SSergey Zigachev 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
2006*b843c749SSergey Zigachev 
2007*b843c749SSergey Zigachev 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2008*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
2009*b843c749SSergey Zigachev 			continue;
2010*b843c749SSergey Zigachev 		/* displays are handled in phase1 */
2011*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2012*b843c749SSergey Zigachev 			continue;
2013*b843c749SSergey Zigachev 		/* ungate blocks so that suspend can properly shut them down */
2014*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
2015*b843c749SSergey Zigachev 			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2016*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2017*b843c749SSergey Zigachev 										     AMD_CG_STATE_UNGATE);
2018*b843c749SSergey Zigachev 			if (r) {
2019*b843c749SSergey Zigachev 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
2020*b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
2021*b843c749SSergey Zigachev 			}
2022*b843c749SSergey Zigachev 		}
2023*b843c749SSergey Zigachev 		/* XXX handle errors */
2024*b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2025*b843c749SSergey Zigachev 		/* XXX handle errors */
2026*b843c749SSergey Zigachev 		if (r) {
2027*b843c749SSergey Zigachev 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2028*b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
2029*b843c749SSergey Zigachev 		}
2030*b843c749SSergey Zigachev 	}
2031*b843c749SSergey Zigachev 
2032*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
2033*b843c749SSergey Zigachev 		amdgpu_virt_release_full_gpu(adev, false);
2034*b843c749SSergey Zigachev 
2035*b843c749SSergey Zigachev 	return 0;
2036*b843c749SSergey Zigachev }
2037*b843c749SSergey Zigachev 
2038*b843c749SSergey Zigachev /**
2039*b843c749SSergey Zigachev  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2040*b843c749SSergey Zigachev  *
2041*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2042*b843c749SSergey Zigachev  *
2043*b843c749SSergey Zigachev  * Main suspend function for hardware IPs.  The list of all the hardware
2044*b843c749SSergey Zigachev  * IPs that make up the asic is walked, clockgating is disabled and the
2045*b843c749SSergey Zigachev  * suspend callbacks are run.  suspend puts the hardware and software state
2046*b843c749SSergey Zigachev  * in each IP into a state suitable for suspend.
2047*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
2048*b843c749SSergey Zigachev  */
2049*b843c749SSergey Zigachev int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2050*b843c749SSergey Zigachev {
2051*b843c749SSergey Zigachev 	int r;
2052*b843c749SSergey Zigachev 
2053*b843c749SSergey Zigachev 	r = amdgpu_device_ip_suspend_phase1(adev);
2054*b843c749SSergey Zigachev 	if (r)
2055*b843c749SSergey Zigachev 		return r;
2056*b843c749SSergey Zigachev 	r = amdgpu_device_ip_suspend_phase2(adev);
2057*b843c749SSergey Zigachev 
2058*b843c749SSergey Zigachev 	return r;
2059*b843c749SSergey Zigachev }
2060*b843c749SSergey Zigachev 
2061*b843c749SSergey Zigachev static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2062*b843c749SSergey Zigachev {
2063*b843c749SSergey Zigachev 	int i, r;
2064*b843c749SSergey Zigachev 
2065*b843c749SSergey Zigachev 	static enum amd_ip_block_type ip_order[] = {
2066*b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_GMC,
2067*b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_COMMON,
2068*b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_PSP,
2069*b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_IH,
2070*b843c749SSergey Zigachev 	};
2071*b843c749SSergey Zigachev 
2072*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
2073*b843c749SSergey Zigachev 		int j;
2074*b843c749SSergey Zigachev 		struct amdgpu_ip_block *block;
2075*b843c749SSergey Zigachev 
2076*b843c749SSergey Zigachev 		for (j = 0; j < adev->num_ip_blocks; j++) {
2077*b843c749SSergey Zigachev 			block = &adev->ip_blocks[j];
2078*b843c749SSergey Zigachev 
2079*b843c749SSergey Zigachev 			if (block->version->type != ip_order[i] ||
2080*b843c749SSergey Zigachev 				!block->status.valid)
2081*b843c749SSergey Zigachev 				continue;
2082*b843c749SSergey Zigachev 
2083*b843c749SSergey Zigachev 			r = block->version->funcs->hw_init(adev);
2084*b843c749SSergey Zigachev 			DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2085*b843c749SSergey Zigachev 			if (r)
2086*b843c749SSergey Zigachev 				return r;
2087*b843c749SSergey Zigachev 		}
2088*b843c749SSergey Zigachev 	}
2089*b843c749SSergey Zigachev 
2090*b843c749SSergey Zigachev 	return 0;
2091*b843c749SSergey Zigachev }
2092*b843c749SSergey Zigachev 
2093*b843c749SSergey Zigachev static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2094*b843c749SSergey Zigachev {
2095*b843c749SSergey Zigachev 	int i, r;
2096*b843c749SSergey Zigachev 
2097*b843c749SSergey Zigachev 	static enum amd_ip_block_type ip_order[] = {
2098*b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_SMC,
2099*b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_DCE,
2100*b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_GFX,
2101*b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_SDMA,
2102*b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_UVD,
2103*b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_VCE
2104*b843c749SSergey Zigachev 	};
2105*b843c749SSergey Zigachev 
2106*b843c749SSergey Zigachev 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2107*b843c749SSergey Zigachev 		int j;
2108*b843c749SSergey Zigachev 		struct amdgpu_ip_block *block;
2109*b843c749SSergey Zigachev 
2110*b843c749SSergey Zigachev 		for (j = 0; j < adev->num_ip_blocks; j++) {
2111*b843c749SSergey Zigachev 			block = &adev->ip_blocks[j];
2112*b843c749SSergey Zigachev 
2113*b843c749SSergey Zigachev 			if (block->version->type != ip_order[i] ||
2114*b843c749SSergey Zigachev 				!block->status.valid)
2115*b843c749SSergey Zigachev 				continue;
2116*b843c749SSergey Zigachev 
2117*b843c749SSergey Zigachev 			r = block->version->funcs->hw_init(adev);
2118*b843c749SSergey Zigachev 			DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2119*b843c749SSergey Zigachev 			if (r)
2120*b843c749SSergey Zigachev 				return r;
2121*b843c749SSergey Zigachev 		}
2122*b843c749SSergey Zigachev 	}
2123*b843c749SSergey Zigachev 
2124*b843c749SSergey Zigachev 	return 0;
2125*b843c749SSergey Zigachev }
2126*b843c749SSergey Zigachev 
2127*b843c749SSergey Zigachev /**
2128*b843c749SSergey Zigachev  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2129*b843c749SSergey Zigachev  *
2130*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2131*b843c749SSergey Zigachev  *
2132*b843c749SSergey Zigachev  * First resume function for hardware IPs.  The list of all the hardware
2133*b843c749SSergey Zigachev  * IPs that make up the asic is walked and the resume callbacks are run for
2134*b843c749SSergey Zigachev  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2135*b843c749SSergey Zigachev  * after a suspend and updates the software state as necessary.  This
2136*b843c749SSergey Zigachev  * function is also used for restoring the GPU after a GPU reset.
2137*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
2138*b843c749SSergey Zigachev  */
2139*b843c749SSergey Zigachev static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2140*b843c749SSergey Zigachev {
2141*b843c749SSergey Zigachev 	int i, r;
2142*b843c749SSergey Zigachev 
2143*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
2144*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
2145*b843c749SSergey Zigachev 			continue;
2146*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2147*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2148*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2149*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->resume(adev);
2150*b843c749SSergey Zigachev 			if (r) {
2151*b843c749SSergey Zigachev 				DRM_ERROR("resume of IP block <%s> failed %d\n",
2152*b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
2153*b843c749SSergey Zigachev 				return r;
2154*b843c749SSergey Zigachev 			}
2155*b843c749SSergey Zigachev 		}
2156*b843c749SSergey Zigachev 	}
2157*b843c749SSergey Zigachev 
2158*b843c749SSergey Zigachev 	return 0;
2159*b843c749SSergey Zigachev }
2160*b843c749SSergey Zigachev 
2161*b843c749SSergey Zigachev /**
2162*b843c749SSergey Zigachev  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2163*b843c749SSergey Zigachev  *
2164*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2165*b843c749SSergey Zigachev  *
2166*b843c749SSergey Zigachev  * First resume function for hardware IPs.  The list of all the hardware
2167*b843c749SSergey Zigachev  * IPs that make up the asic is walked and the resume callbacks are run for
2168*b843c749SSergey Zigachev  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2169*b843c749SSergey Zigachev  * functional state after a suspend and updates the software state as
2170*b843c749SSergey Zigachev  * necessary.  This function is also used for restoring the GPU after a GPU
2171*b843c749SSergey Zigachev  * reset.
2172*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
2173*b843c749SSergey Zigachev  */
2174*b843c749SSergey Zigachev static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2175*b843c749SSergey Zigachev {
2176*b843c749SSergey Zigachev 	int i, r;
2177*b843c749SSergey Zigachev 
2178*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
2179*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
2180*b843c749SSergey Zigachev 			continue;
2181*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2182*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2183*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
2184*b843c749SSergey Zigachev 			continue;
2185*b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->resume(adev);
2186*b843c749SSergey Zigachev 		if (r) {
2187*b843c749SSergey Zigachev 			DRM_ERROR("resume of IP block <%s> failed %d\n",
2188*b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
2189*b843c749SSergey Zigachev 			return r;
2190*b843c749SSergey Zigachev 		}
2191*b843c749SSergey Zigachev 	}
2192*b843c749SSergey Zigachev 
2193*b843c749SSergey Zigachev 	return 0;
2194*b843c749SSergey Zigachev }
2195*b843c749SSergey Zigachev 
2196*b843c749SSergey Zigachev /**
2197*b843c749SSergey Zigachev  * amdgpu_device_ip_resume - run resume for hardware IPs
2198*b843c749SSergey Zigachev  *
2199*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2200*b843c749SSergey Zigachev  *
2201*b843c749SSergey Zigachev  * Main resume function for hardware IPs.  The hardware IPs
2202*b843c749SSergey Zigachev  * are split into two resume functions because they are
2203*b843c749SSergey Zigachev  * are also used in in recovering from a GPU reset and some additional
2204*b843c749SSergey Zigachev  * steps need to be take between them.  In this case (S3/S4) they are
2205*b843c749SSergey Zigachev  * run sequentially.
2206*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
2207*b843c749SSergey Zigachev  */
2208*b843c749SSergey Zigachev static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2209*b843c749SSergey Zigachev {
2210*b843c749SSergey Zigachev 	int r;
2211*b843c749SSergey Zigachev 
2212*b843c749SSergey Zigachev 	r = amdgpu_device_ip_resume_phase1(adev);
2213*b843c749SSergey Zigachev 	if (r)
2214*b843c749SSergey Zigachev 		return r;
2215*b843c749SSergey Zigachev 	r = amdgpu_device_ip_resume_phase2(adev);
2216*b843c749SSergey Zigachev 
2217*b843c749SSergey Zigachev 	return r;
2218*b843c749SSergey Zigachev }
2219*b843c749SSergey Zigachev 
2220*b843c749SSergey Zigachev /**
2221*b843c749SSergey Zigachev  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2222*b843c749SSergey Zigachev  *
2223*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2224*b843c749SSergey Zigachev  *
2225*b843c749SSergey Zigachev  * Query the VBIOS data tables to determine if the board supports SR-IOV.
2226*b843c749SSergey Zigachev  */
2227*b843c749SSergey Zigachev static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2228*b843c749SSergey Zigachev {
2229*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev)) {
2230*b843c749SSergey Zigachev 		if (adev->is_atom_fw) {
2231*b843c749SSergey Zigachev 			if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2232*b843c749SSergey Zigachev 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2233*b843c749SSergey Zigachev 		} else {
2234*b843c749SSergey Zigachev 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2235*b843c749SSergey Zigachev 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2236*b843c749SSergey Zigachev 		}
2237*b843c749SSergey Zigachev 
2238*b843c749SSergey Zigachev 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2239*b843c749SSergey Zigachev 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2240*b843c749SSergey Zigachev 	}
2241*b843c749SSergey Zigachev }
2242*b843c749SSergey Zigachev 
2243*b843c749SSergey Zigachev /**
2244*b843c749SSergey Zigachev  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2245*b843c749SSergey Zigachev  *
2246*b843c749SSergey Zigachev  * @asic_type: AMD asic type
2247*b843c749SSergey Zigachev  *
2248*b843c749SSergey Zigachev  * Check if there is DC (new modesetting infrastructre) support for an asic.
2249*b843c749SSergey Zigachev  * returns true if DC has support, false if not.
2250*b843c749SSergey Zigachev  */
2251*b843c749SSergey Zigachev bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2252*b843c749SSergey Zigachev {
2253*b843c749SSergey Zigachev 	switch (asic_type) {
2254*b843c749SSergey Zigachev #if defined(CONFIG_DRM_AMD_DC)
2255*b843c749SSergey Zigachev 	case CHIP_BONAIRE:
2256*b843c749SSergey Zigachev 	case CHIP_KAVERI:
2257*b843c749SSergey Zigachev 	case CHIP_KABINI:
2258*b843c749SSergey Zigachev 	case CHIP_MULLINS:
2259*b843c749SSergey Zigachev 		/*
2260*b843c749SSergey Zigachev 		 * We have systems in the wild with these ASICs that require
2261*b843c749SSergey Zigachev 		 * LVDS and VGA support which is not supported with DC.
2262*b843c749SSergey Zigachev 		 *
2263*b843c749SSergey Zigachev 		 * Fallback to the non-DC driver here by default so as not to
2264*b843c749SSergey Zigachev 		 * cause regressions.
2265*b843c749SSergey Zigachev 		 */
2266*b843c749SSergey Zigachev 		return amdgpu_dc > 0;
2267*b843c749SSergey Zigachev 	case CHIP_HAWAII:
2268*b843c749SSergey Zigachev 	case CHIP_CARRIZO:
2269*b843c749SSergey Zigachev 	case CHIP_STONEY:
2270*b843c749SSergey Zigachev 	case CHIP_POLARIS10:
2271*b843c749SSergey Zigachev 	case CHIP_POLARIS11:
2272*b843c749SSergey Zigachev 	case CHIP_POLARIS12:
2273*b843c749SSergey Zigachev 	case CHIP_VEGAM:
2274*b843c749SSergey Zigachev 	case CHIP_TONGA:
2275*b843c749SSergey Zigachev 	case CHIP_FIJI:
2276*b843c749SSergey Zigachev 	case CHIP_VEGA10:
2277*b843c749SSergey Zigachev 	case CHIP_VEGA12:
2278*b843c749SSergey Zigachev 	case CHIP_VEGA20:
2279*b843c749SSergey Zigachev #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2280*b843c749SSergey Zigachev 	case CHIP_RAVEN:
2281*b843c749SSergey Zigachev #endif
2282*b843c749SSergey Zigachev 		return amdgpu_dc != 0;
2283*b843c749SSergey Zigachev #endif
2284*b843c749SSergey Zigachev 	default:
2285*b843c749SSergey Zigachev 		return false;
2286*b843c749SSergey Zigachev 	}
2287*b843c749SSergey Zigachev }
2288*b843c749SSergey Zigachev 
2289*b843c749SSergey Zigachev /**
2290*b843c749SSergey Zigachev  * amdgpu_device_has_dc_support - check if dc is supported
2291*b843c749SSergey Zigachev  *
2292*b843c749SSergey Zigachev  * @adev: amdgpu_device_pointer
2293*b843c749SSergey Zigachev  *
2294*b843c749SSergey Zigachev  * Returns true for supported, false for not supported
2295*b843c749SSergey Zigachev  */
2296*b843c749SSergey Zigachev bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2297*b843c749SSergey Zigachev {
2298*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
2299*b843c749SSergey Zigachev 		return false;
2300*b843c749SSergey Zigachev 
2301*b843c749SSergey Zigachev 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
2302*b843c749SSergey Zigachev }
2303*b843c749SSergey Zigachev 
2304*b843c749SSergey Zigachev /**
2305*b843c749SSergey Zigachev  * amdgpu_device_init - initialize the driver
2306*b843c749SSergey Zigachev  *
2307*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2308*b843c749SSergey Zigachev  * @ddev: drm dev pointer
2309*b843c749SSergey Zigachev  * @pdev: pci dev pointer
2310*b843c749SSergey Zigachev  * @flags: driver flags
2311*b843c749SSergey Zigachev  *
2312*b843c749SSergey Zigachev  * Initializes the driver info and hw (all asics).
2313*b843c749SSergey Zigachev  * Returns 0 for success or an error on failure.
2314*b843c749SSergey Zigachev  * Called at driver startup.
2315*b843c749SSergey Zigachev  */
2316*b843c749SSergey Zigachev int amdgpu_device_init(struct amdgpu_device *adev,
2317*b843c749SSergey Zigachev 		       struct drm_device *ddev,
2318*b843c749SSergey Zigachev 		       struct pci_dev *pdev,
2319*b843c749SSergey Zigachev 		       uint32_t flags)
2320*b843c749SSergey Zigachev {
2321*b843c749SSergey Zigachev 	int r, i;
2322*b843c749SSergey Zigachev 	bool runtime = false;
2323*b843c749SSergey Zigachev 	u32 max_MBps;
2324*b843c749SSergey Zigachev 
2325*b843c749SSergey Zigachev 	adev->shutdown = false;
2326*b843c749SSergey Zigachev 	adev->dev = &pdev->dev;
2327*b843c749SSergey Zigachev 	adev->ddev = ddev;
2328*b843c749SSergey Zigachev 	adev->pdev = pdev;
2329*b843c749SSergey Zigachev 	adev->flags = flags;
2330*b843c749SSergey Zigachev 	adev->asic_type = flags & AMD_ASIC_MASK;
2331*b843c749SSergey Zigachev 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2332*b843c749SSergey Zigachev 	if (amdgpu_emu_mode == 1)
2333*b843c749SSergey Zigachev 		adev->usec_timeout *= 2;
2334*b843c749SSergey Zigachev 	adev->gmc.gart_size = 512 * 1024 * 1024;
2335*b843c749SSergey Zigachev 	adev->accel_working = false;
2336*b843c749SSergey Zigachev 	adev->num_rings = 0;
2337*b843c749SSergey Zigachev 	adev->mman.buffer_funcs = NULL;
2338*b843c749SSergey Zigachev 	adev->mman.buffer_funcs_ring = NULL;
2339*b843c749SSergey Zigachev 	adev->vm_manager.vm_pte_funcs = NULL;
2340*b843c749SSergey Zigachev 	adev->vm_manager.vm_pte_num_rings = 0;
2341*b843c749SSergey Zigachev 	adev->gmc.gmc_funcs = NULL;
2342*b843c749SSergey Zigachev 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2343*b843c749SSergey Zigachev 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2344*b843c749SSergey Zigachev 
2345*b843c749SSergey Zigachev 	adev->smc_rreg = &amdgpu_invalid_rreg;
2346*b843c749SSergey Zigachev 	adev->smc_wreg = &amdgpu_invalid_wreg;
2347*b843c749SSergey Zigachev 	adev->pcie_rreg = &amdgpu_invalid_rreg;
2348*b843c749SSergey Zigachev 	adev->pcie_wreg = &amdgpu_invalid_wreg;
2349*b843c749SSergey Zigachev 	adev->pciep_rreg = &amdgpu_invalid_rreg;
2350*b843c749SSergey Zigachev 	adev->pciep_wreg = &amdgpu_invalid_wreg;
2351*b843c749SSergey Zigachev 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2352*b843c749SSergey Zigachev 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2353*b843c749SSergey Zigachev 	adev->didt_rreg = &amdgpu_invalid_rreg;
2354*b843c749SSergey Zigachev 	adev->didt_wreg = &amdgpu_invalid_wreg;
2355*b843c749SSergey Zigachev 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2356*b843c749SSergey Zigachev 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2357*b843c749SSergey Zigachev 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2358*b843c749SSergey Zigachev 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2359*b843c749SSergey Zigachev 
2360*b843c749SSergey Zigachev 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2361*b843c749SSergey Zigachev 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2362*b843c749SSergey Zigachev 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2363*b843c749SSergey Zigachev 
2364*b843c749SSergey Zigachev 	/* mutex initialization are all done here so we
2365*b843c749SSergey Zigachev 	 * can recall function without having locking issues */
2366*b843c749SSergey Zigachev 	atomic_set(&adev->irq.ih.lock, 0);
2367*b843c749SSergey Zigachev 	mutex_init(&adev->firmware.mutex);
2368*b843c749SSergey Zigachev 	mutex_init(&adev->pm.mutex);
2369*b843c749SSergey Zigachev 	mutex_init(&adev->gfx.gpu_clock_mutex);
2370*b843c749SSergey Zigachev 	mutex_init(&adev->srbm_mutex);
2371*b843c749SSergey Zigachev 	mutex_init(&adev->gfx.pipe_reserve_mutex);
2372*b843c749SSergey Zigachev 	mutex_init(&adev->grbm_idx_mutex);
2373*b843c749SSergey Zigachev 	mutex_init(&adev->mn_lock);
2374*b843c749SSergey Zigachev 	mutex_init(&adev->virt.vf_errors.lock);
2375*b843c749SSergey Zigachev 	hash_init(adev->mn_hash);
2376*b843c749SSergey Zigachev 	mutex_init(&adev->lock_reset);
2377*b843c749SSergey Zigachev 
2378*b843c749SSergey Zigachev 	amdgpu_device_check_arguments(adev);
2379*b843c749SSergey Zigachev 
2380*b843c749SSergey Zigachev 	spin_lock_init(&adev->mmio_idx_lock);
2381*b843c749SSergey Zigachev 	spin_lock_init(&adev->smc_idx_lock);
2382*b843c749SSergey Zigachev 	spin_lock_init(&adev->pcie_idx_lock);
2383*b843c749SSergey Zigachev 	spin_lock_init(&adev->uvd_ctx_idx_lock);
2384*b843c749SSergey Zigachev 	spin_lock_init(&adev->didt_idx_lock);
2385*b843c749SSergey Zigachev 	spin_lock_init(&adev->gc_cac_idx_lock);
2386*b843c749SSergey Zigachev 	spin_lock_init(&adev->se_cac_idx_lock);
2387*b843c749SSergey Zigachev 	spin_lock_init(&adev->audio_endpt_idx_lock);
2388*b843c749SSergey Zigachev 	spin_lock_init(&adev->mm_stats.lock);
2389*b843c749SSergey Zigachev 
2390*b843c749SSergey Zigachev 	INIT_LIST_HEAD(&adev->shadow_list);
2391*b843c749SSergey Zigachev 	mutex_init(&adev->shadow_list_lock);
2392*b843c749SSergey Zigachev 
2393*b843c749SSergey Zigachev 	INIT_LIST_HEAD(&adev->ring_lru_list);
2394*b843c749SSergey Zigachev 	spin_lock_init(&adev->ring_lru_list_lock);
2395*b843c749SSergey Zigachev 
2396*b843c749SSergey Zigachev 	INIT_DELAYED_WORK(&adev->late_init_work,
2397*b843c749SSergey Zigachev 			  amdgpu_device_ip_late_init_func_handler);
2398*b843c749SSergey Zigachev 
2399*b843c749SSergey Zigachev 	adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
2400*b843c749SSergey Zigachev 
2401*b843c749SSergey Zigachev 	/* Registers mapping */
2402*b843c749SSergey Zigachev 	/* TODO: block userspace mapping of io register */
2403*b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_BONAIRE) {
2404*b843c749SSergey Zigachev 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2405*b843c749SSergey Zigachev 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2406*b843c749SSergey Zigachev 	} else {
2407*b843c749SSergey Zigachev 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2408*b843c749SSergey Zigachev 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2409*b843c749SSergey Zigachev 	}
2410*b843c749SSergey Zigachev 
2411*b843c749SSergey Zigachev 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2412*b843c749SSergey Zigachev 	if (adev->rmmio == NULL) {
2413*b843c749SSergey Zigachev 		return -ENOMEM;
2414*b843c749SSergey Zigachev 	}
2415*b843c749SSergey Zigachev 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2416*b843c749SSergey Zigachev 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2417*b843c749SSergey Zigachev 
2418*b843c749SSergey Zigachev 	/* doorbell bar mapping */
2419*b843c749SSergey Zigachev 	amdgpu_device_doorbell_init(adev);
2420*b843c749SSergey Zigachev 
2421*b843c749SSergey Zigachev 	/* io port mapping */
2422*b843c749SSergey Zigachev 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2423*b843c749SSergey Zigachev 		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2424*b843c749SSergey Zigachev 			adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2425*b843c749SSergey Zigachev 			adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2426*b843c749SSergey Zigachev 			break;
2427*b843c749SSergey Zigachev 		}
2428*b843c749SSergey Zigachev 	}
2429*b843c749SSergey Zigachev 	if (adev->rio_mem == NULL)
2430*b843c749SSergey Zigachev 		DRM_INFO("PCI I/O BAR is not found.\n");
2431*b843c749SSergey Zigachev 
2432*b843c749SSergey Zigachev 	amdgpu_device_get_pcie_info(adev);
2433*b843c749SSergey Zigachev 
2434*b843c749SSergey Zigachev 	/* early init functions */
2435*b843c749SSergey Zigachev 	r = amdgpu_device_ip_early_init(adev);
2436*b843c749SSergey Zigachev 	if (r)
2437*b843c749SSergey Zigachev 		return r;
2438*b843c749SSergey Zigachev 
2439*b843c749SSergey Zigachev 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2440*b843c749SSergey Zigachev 	/* this will fail for cards that aren't VGA class devices, just
2441*b843c749SSergey Zigachev 	 * ignore it */
2442*b843c749SSergey Zigachev 	vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
2443*b843c749SSergey Zigachev 
2444*b843c749SSergey Zigachev 	if (amdgpu_device_is_px(ddev))
2445*b843c749SSergey Zigachev 		runtime = true;
2446*b843c749SSergey Zigachev 	if (!pci_is_thunderbolt_attached(adev->pdev))
2447*b843c749SSergey Zigachev 		vga_switcheroo_register_client(adev->pdev,
2448*b843c749SSergey Zigachev 					       &amdgpu_switcheroo_ops, runtime);
2449*b843c749SSergey Zigachev 	if (runtime)
2450*b843c749SSergey Zigachev 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2451*b843c749SSergey Zigachev 
2452*b843c749SSergey Zigachev 	if (amdgpu_emu_mode == 1) {
2453*b843c749SSergey Zigachev 		/* post the asic on emulation mode */
2454*b843c749SSergey Zigachev 		emu_soc_asic_init(adev);
2455*b843c749SSergey Zigachev 		goto fence_driver_init;
2456*b843c749SSergey Zigachev 	}
2457*b843c749SSergey Zigachev 
2458*b843c749SSergey Zigachev 	/* Read BIOS */
2459*b843c749SSergey Zigachev 	if (!amdgpu_get_bios(adev)) {
2460*b843c749SSergey Zigachev 		r = -EINVAL;
2461*b843c749SSergey Zigachev 		goto failed;
2462*b843c749SSergey Zigachev 	}
2463*b843c749SSergey Zigachev 
2464*b843c749SSergey Zigachev 	r = amdgpu_atombios_init(adev);
2465*b843c749SSergey Zigachev 	if (r) {
2466*b843c749SSergey Zigachev 		dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2467*b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2468*b843c749SSergey Zigachev 		goto failed;
2469*b843c749SSergey Zigachev 	}
2470*b843c749SSergey Zigachev 
2471*b843c749SSergey Zigachev 	/* detect if we are with an SRIOV vbios */
2472*b843c749SSergey Zigachev 	amdgpu_device_detect_sriov_bios(adev);
2473*b843c749SSergey Zigachev 
2474*b843c749SSergey Zigachev 	/* Post card if necessary */
2475*b843c749SSergey Zigachev 	if (amdgpu_device_need_post(adev)) {
2476*b843c749SSergey Zigachev 		if (!adev->bios) {
2477*b843c749SSergey Zigachev 			dev_err(adev->dev, "no vBIOS found\n");
2478*b843c749SSergey Zigachev 			r = -EINVAL;
2479*b843c749SSergey Zigachev 			goto failed;
2480*b843c749SSergey Zigachev 		}
2481*b843c749SSergey Zigachev 		DRM_INFO("GPU posting now...\n");
2482*b843c749SSergey Zigachev 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2483*b843c749SSergey Zigachev 		if (r) {
2484*b843c749SSergey Zigachev 			dev_err(adev->dev, "gpu post error!\n");
2485*b843c749SSergey Zigachev 			goto failed;
2486*b843c749SSergey Zigachev 		}
2487*b843c749SSergey Zigachev 	}
2488*b843c749SSergey Zigachev 
2489*b843c749SSergey Zigachev 	if (adev->is_atom_fw) {
2490*b843c749SSergey Zigachev 		/* Initialize clocks */
2491*b843c749SSergey Zigachev 		r = amdgpu_atomfirmware_get_clock_info(adev);
2492*b843c749SSergey Zigachev 		if (r) {
2493*b843c749SSergey Zigachev 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2494*b843c749SSergey Zigachev 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2495*b843c749SSergey Zigachev 			goto failed;
2496*b843c749SSergey Zigachev 		}
2497*b843c749SSergey Zigachev 	} else {
2498*b843c749SSergey Zigachev 		/* Initialize clocks */
2499*b843c749SSergey Zigachev 		r = amdgpu_atombios_get_clock_info(adev);
2500*b843c749SSergey Zigachev 		if (r) {
2501*b843c749SSergey Zigachev 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2502*b843c749SSergey Zigachev 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2503*b843c749SSergey Zigachev 			goto failed;
2504*b843c749SSergey Zigachev 		}
2505*b843c749SSergey Zigachev 		/* init i2c buses */
2506*b843c749SSergey Zigachev 		if (!amdgpu_device_has_dc_support(adev))
2507*b843c749SSergey Zigachev 			amdgpu_atombios_i2c_init(adev);
2508*b843c749SSergey Zigachev 	}
2509*b843c749SSergey Zigachev 
2510*b843c749SSergey Zigachev fence_driver_init:
2511*b843c749SSergey Zigachev 	/* Fence driver */
2512*b843c749SSergey Zigachev 	r = amdgpu_fence_driver_init(adev);
2513*b843c749SSergey Zigachev 	if (r) {
2514*b843c749SSergey Zigachev 		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2515*b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
2516*b843c749SSergey Zigachev 		goto failed;
2517*b843c749SSergey Zigachev 	}
2518*b843c749SSergey Zigachev 
2519*b843c749SSergey Zigachev 	/* init the mode config */
2520*b843c749SSergey Zigachev 	drm_mode_config_init(adev->ddev);
2521*b843c749SSergey Zigachev 
2522*b843c749SSergey Zigachev 	r = amdgpu_device_ip_init(adev);
2523*b843c749SSergey Zigachev 	if (r) {
2524*b843c749SSergey Zigachev 		/* failed in exclusive mode due to timeout */
2525*b843c749SSergey Zigachev 		if (amdgpu_sriov_vf(adev) &&
2526*b843c749SSergey Zigachev 		    !amdgpu_sriov_runtime(adev) &&
2527*b843c749SSergey Zigachev 		    amdgpu_virt_mmio_blocked(adev) &&
2528*b843c749SSergey Zigachev 		    !amdgpu_virt_wait_reset(adev)) {
2529*b843c749SSergey Zigachev 			dev_err(adev->dev, "VF exclusive mode timeout\n");
2530*b843c749SSergey Zigachev 			/* Don't send request since VF is inactive. */
2531*b843c749SSergey Zigachev 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2532*b843c749SSergey Zigachev 			adev->virt.ops = NULL;
2533*b843c749SSergey Zigachev 			r = -EAGAIN;
2534*b843c749SSergey Zigachev 			goto failed;
2535*b843c749SSergey Zigachev 		}
2536*b843c749SSergey Zigachev 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
2537*b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2538*b843c749SSergey Zigachev 		goto failed;
2539*b843c749SSergey Zigachev 	}
2540*b843c749SSergey Zigachev 
2541*b843c749SSergey Zigachev 	adev->accel_working = true;
2542*b843c749SSergey Zigachev 
2543*b843c749SSergey Zigachev 	amdgpu_vm_check_compute_bug(adev);
2544*b843c749SSergey Zigachev 
2545*b843c749SSergey Zigachev 	/* Initialize the buffer migration limit. */
2546*b843c749SSergey Zigachev 	if (amdgpu_moverate >= 0)
2547*b843c749SSergey Zigachev 		max_MBps = amdgpu_moverate;
2548*b843c749SSergey Zigachev 	else
2549*b843c749SSergey Zigachev 		max_MBps = 8; /* Allow 8 MB/s. */
2550*b843c749SSergey Zigachev 	/* Get a log2 for easy divisions. */
2551*b843c749SSergey Zigachev 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2552*b843c749SSergey Zigachev 
2553*b843c749SSergey Zigachev 	r = amdgpu_ib_pool_init(adev);
2554*b843c749SSergey Zigachev 	if (r) {
2555*b843c749SSergey Zigachev 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2556*b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2557*b843c749SSergey Zigachev 		goto failed;
2558*b843c749SSergey Zigachev 	}
2559*b843c749SSergey Zigachev 
2560*b843c749SSergey Zigachev 	amdgpu_fbdev_init(adev);
2561*b843c749SSergey Zigachev 
2562*b843c749SSergey Zigachev 	r = amdgpu_pm_sysfs_init(adev);
2563*b843c749SSergey Zigachev 	if (r)
2564*b843c749SSergey Zigachev 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2565*b843c749SSergey Zigachev 
2566*b843c749SSergey Zigachev 	r = amdgpu_debugfs_gem_init(adev);
2567*b843c749SSergey Zigachev 	if (r)
2568*b843c749SSergey Zigachev 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2569*b843c749SSergey Zigachev 
2570*b843c749SSergey Zigachev 	r = amdgpu_debugfs_regs_init(adev);
2571*b843c749SSergey Zigachev 	if (r)
2572*b843c749SSergey Zigachev 		DRM_ERROR("registering register debugfs failed (%d).\n", r);
2573*b843c749SSergey Zigachev 
2574*b843c749SSergey Zigachev 	r = amdgpu_debugfs_firmware_init(adev);
2575*b843c749SSergey Zigachev 	if (r)
2576*b843c749SSergey Zigachev 		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2577*b843c749SSergey Zigachev 
2578*b843c749SSergey Zigachev 	r = amdgpu_debugfs_init(adev);
2579*b843c749SSergey Zigachev 	if (r)
2580*b843c749SSergey Zigachev 		DRM_ERROR("Creating debugfs files failed (%d).\n", r);
2581*b843c749SSergey Zigachev 
2582*b843c749SSergey Zigachev 	if ((amdgpu_testing & 1)) {
2583*b843c749SSergey Zigachev 		if (adev->accel_working)
2584*b843c749SSergey Zigachev 			amdgpu_test_moves(adev);
2585*b843c749SSergey Zigachev 		else
2586*b843c749SSergey Zigachev 			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2587*b843c749SSergey Zigachev 	}
2588*b843c749SSergey Zigachev 	if (amdgpu_benchmarking) {
2589*b843c749SSergey Zigachev 		if (adev->accel_working)
2590*b843c749SSergey Zigachev 			amdgpu_benchmark(adev, amdgpu_benchmarking);
2591*b843c749SSergey Zigachev 		else
2592*b843c749SSergey Zigachev 			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2593*b843c749SSergey Zigachev 	}
2594*b843c749SSergey Zigachev 
2595*b843c749SSergey Zigachev 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
2596*b843c749SSergey Zigachev 	 * explicit gating rather than handling it automatically.
2597*b843c749SSergey Zigachev 	 */
2598*b843c749SSergey Zigachev 	r = amdgpu_device_ip_late_init(adev);
2599*b843c749SSergey Zigachev 	if (r) {
2600*b843c749SSergey Zigachev 		dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
2601*b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2602*b843c749SSergey Zigachev 		goto failed;
2603*b843c749SSergey Zigachev 	}
2604*b843c749SSergey Zigachev 
2605*b843c749SSergey Zigachev 	return 0;
2606*b843c749SSergey Zigachev 
2607*b843c749SSergey Zigachev failed:
2608*b843c749SSergey Zigachev 	amdgpu_vf_error_trans_all(adev);
2609*b843c749SSergey Zigachev 	if (runtime)
2610*b843c749SSergey Zigachev 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2611*b843c749SSergey Zigachev 
2612*b843c749SSergey Zigachev 	return r;
2613*b843c749SSergey Zigachev }
2614*b843c749SSergey Zigachev 
2615*b843c749SSergey Zigachev /**
2616*b843c749SSergey Zigachev  * amdgpu_device_fini - tear down the driver
2617*b843c749SSergey Zigachev  *
2618*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2619*b843c749SSergey Zigachev  *
2620*b843c749SSergey Zigachev  * Tear down the driver info (all asics).
2621*b843c749SSergey Zigachev  * Called at driver shutdown.
2622*b843c749SSergey Zigachev  */
2623*b843c749SSergey Zigachev void amdgpu_device_fini(struct amdgpu_device *adev)
2624*b843c749SSergey Zigachev {
2625*b843c749SSergey Zigachev 	int r;
2626*b843c749SSergey Zigachev 
2627*b843c749SSergey Zigachev 	DRM_INFO("amdgpu: finishing device.\n");
2628*b843c749SSergey Zigachev 	adev->shutdown = true;
2629*b843c749SSergey Zigachev 	/* disable all interrupts */
2630*b843c749SSergey Zigachev 	amdgpu_irq_disable_all(adev);
2631*b843c749SSergey Zigachev 	if (adev->mode_info.mode_config_initialized){
2632*b843c749SSergey Zigachev 		if (!amdgpu_device_has_dc_support(adev))
2633*b843c749SSergey Zigachev 			drm_crtc_force_disable_all(adev->ddev);
2634*b843c749SSergey Zigachev 		else
2635*b843c749SSergey Zigachev 			drm_atomic_helper_shutdown(adev->ddev);
2636*b843c749SSergey Zigachev 	}
2637*b843c749SSergey Zigachev 	amdgpu_ib_pool_fini(adev);
2638*b843c749SSergey Zigachev 	amdgpu_fence_driver_fini(adev);
2639*b843c749SSergey Zigachev 	amdgpu_pm_sysfs_fini(adev);
2640*b843c749SSergey Zigachev 	amdgpu_fbdev_fini(adev);
2641*b843c749SSergey Zigachev 	r = amdgpu_device_ip_fini(adev);
2642*b843c749SSergey Zigachev 	if (adev->firmware.gpu_info_fw) {
2643*b843c749SSergey Zigachev 		release_firmware(adev->firmware.gpu_info_fw);
2644*b843c749SSergey Zigachev 		adev->firmware.gpu_info_fw = NULL;
2645*b843c749SSergey Zigachev 	}
2646*b843c749SSergey Zigachev 	adev->accel_working = false;
2647*b843c749SSergey Zigachev 	cancel_delayed_work_sync(&adev->late_init_work);
2648*b843c749SSergey Zigachev 	/* free i2c buses */
2649*b843c749SSergey Zigachev 	if (!amdgpu_device_has_dc_support(adev))
2650*b843c749SSergey Zigachev 		amdgpu_i2c_fini(adev);
2651*b843c749SSergey Zigachev 
2652*b843c749SSergey Zigachev 	if (amdgpu_emu_mode != 1)
2653*b843c749SSergey Zigachev 		amdgpu_atombios_fini(adev);
2654*b843c749SSergey Zigachev 
2655*b843c749SSergey Zigachev 	kfree(adev->bios);
2656*b843c749SSergey Zigachev 	adev->bios = NULL;
2657*b843c749SSergey Zigachev 	if (!pci_is_thunderbolt_attached(adev->pdev))
2658*b843c749SSergey Zigachev 		vga_switcheroo_unregister_client(adev->pdev);
2659*b843c749SSergey Zigachev 	if (adev->flags & AMD_IS_PX)
2660*b843c749SSergey Zigachev 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2661*b843c749SSergey Zigachev 	vga_client_register(adev->pdev, NULL, NULL, NULL);
2662*b843c749SSergey Zigachev 	if (adev->rio_mem)
2663*b843c749SSergey Zigachev 		pci_iounmap(adev->pdev, adev->rio_mem);
2664*b843c749SSergey Zigachev 	adev->rio_mem = NULL;
2665*b843c749SSergey Zigachev 	iounmap(adev->rmmio);
2666*b843c749SSergey Zigachev 	adev->rmmio = NULL;
2667*b843c749SSergey Zigachev 	amdgpu_device_doorbell_fini(adev);
2668*b843c749SSergey Zigachev 	amdgpu_debugfs_regs_cleanup(adev);
2669*b843c749SSergey Zigachev }
2670*b843c749SSergey Zigachev 
2671*b843c749SSergey Zigachev 
2672*b843c749SSergey Zigachev /*
2673*b843c749SSergey Zigachev  * Suspend & resume.
2674*b843c749SSergey Zigachev  */
2675*b843c749SSergey Zigachev /**
2676*b843c749SSergey Zigachev  * amdgpu_device_suspend - initiate device suspend
2677*b843c749SSergey Zigachev  *
2678*b843c749SSergey Zigachev  * @dev: drm dev pointer
2679*b843c749SSergey Zigachev  * @suspend: suspend state
2680*b843c749SSergey Zigachev  * @fbcon : notify the fbdev of suspend
2681*b843c749SSergey Zigachev  *
2682*b843c749SSergey Zigachev  * Puts the hw in the suspend state (all asics).
2683*b843c749SSergey Zigachev  * Returns 0 for success or an error on failure.
2684*b843c749SSergey Zigachev  * Called at driver suspend.
2685*b843c749SSergey Zigachev  */
2686*b843c749SSergey Zigachev int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2687*b843c749SSergey Zigachev {
2688*b843c749SSergey Zigachev 	struct amdgpu_device *adev;
2689*b843c749SSergey Zigachev 	struct drm_crtc *crtc;
2690*b843c749SSergey Zigachev 	struct drm_connector *connector;
2691*b843c749SSergey Zigachev 	int r;
2692*b843c749SSergey Zigachev 
2693*b843c749SSergey Zigachev 	if (dev == NULL || dev->dev_private == NULL) {
2694*b843c749SSergey Zigachev 		return -ENODEV;
2695*b843c749SSergey Zigachev 	}
2696*b843c749SSergey Zigachev 
2697*b843c749SSergey Zigachev 	adev = dev->dev_private;
2698*b843c749SSergey Zigachev 
2699*b843c749SSergey Zigachev 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2700*b843c749SSergey Zigachev 		return 0;
2701*b843c749SSergey Zigachev 
2702*b843c749SSergey Zigachev 	drm_kms_helper_poll_disable(dev);
2703*b843c749SSergey Zigachev 
2704*b843c749SSergey Zigachev 	if (fbcon)
2705*b843c749SSergey Zigachev 		amdgpu_fbdev_set_suspend(adev, 1);
2706*b843c749SSergey Zigachev 
2707*b843c749SSergey Zigachev 	if (!amdgpu_device_has_dc_support(adev)) {
2708*b843c749SSergey Zigachev 		/* turn off display hw */
2709*b843c749SSergey Zigachev 		drm_modeset_lock_all(dev);
2710*b843c749SSergey Zigachev 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2711*b843c749SSergey Zigachev 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2712*b843c749SSergey Zigachev 		}
2713*b843c749SSergey Zigachev 		drm_modeset_unlock_all(dev);
2714*b843c749SSergey Zigachev 			/* unpin the front buffers and cursors */
2715*b843c749SSergey Zigachev 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2716*b843c749SSergey Zigachev 			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2717*b843c749SSergey Zigachev 			struct drm_framebuffer *fb = crtc->primary->fb;
2718*b843c749SSergey Zigachev 			struct amdgpu_bo *robj;
2719*b843c749SSergey Zigachev 
2720*b843c749SSergey Zigachev 			if (amdgpu_crtc->cursor_bo) {
2721*b843c749SSergey Zigachev 				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2722*b843c749SSergey Zigachev 				r = amdgpu_bo_reserve(aobj, true);
2723*b843c749SSergey Zigachev 				if (r == 0) {
2724*b843c749SSergey Zigachev 					amdgpu_bo_unpin(aobj);
2725*b843c749SSergey Zigachev 					amdgpu_bo_unreserve(aobj);
2726*b843c749SSergey Zigachev 				}
2727*b843c749SSergey Zigachev 			}
2728*b843c749SSergey Zigachev 
2729*b843c749SSergey Zigachev 			if (fb == NULL || fb->obj[0] == NULL) {
2730*b843c749SSergey Zigachev 				continue;
2731*b843c749SSergey Zigachev 			}
2732*b843c749SSergey Zigachev 			robj = gem_to_amdgpu_bo(fb->obj[0]);
2733*b843c749SSergey Zigachev 			/* don't unpin kernel fb objects */
2734*b843c749SSergey Zigachev 			if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2735*b843c749SSergey Zigachev 				r = amdgpu_bo_reserve(robj, true);
2736*b843c749SSergey Zigachev 				if (r == 0) {
2737*b843c749SSergey Zigachev 					amdgpu_bo_unpin(robj);
2738*b843c749SSergey Zigachev 					amdgpu_bo_unreserve(robj);
2739*b843c749SSergey Zigachev 				}
2740*b843c749SSergey Zigachev 			}
2741*b843c749SSergey Zigachev 		}
2742*b843c749SSergey Zigachev 	}
2743*b843c749SSergey Zigachev 
2744*b843c749SSergey Zigachev 	amdgpu_amdkfd_suspend(adev);
2745*b843c749SSergey Zigachev 
2746*b843c749SSergey Zigachev 	r = amdgpu_device_ip_suspend_phase1(adev);
2747*b843c749SSergey Zigachev 
2748*b843c749SSergey Zigachev 	/* evict vram memory */
2749*b843c749SSergey Zigachev 	amdgpu_bo_evict_vram(adev);
2750*b843c749SSergey Zigachev 
2751*b843c749SSergey Zigachev 	amdgpu_fence_driver_suspend(adev);
2752*b843c749SSergey Zigachev 
2753*b843c749SSergey Zigachev 	r = amdgpu_device_ip_suspend_phase2(adev);
2754*b843c749SSergey Zigachev 
2755*b843c749SSergey Zigachev 	/* evict remaining vram memory
2756*b843c749SSergey Zigachev 	 * This second call to evict vram is to evict the gart page table
2757*b843c749SSergey Zigachev 	 * using the CPU.
2758*b843c749SSergey Zigachev 	 */
2759*b843c749SSergey Zigachev 	amdgpu_bo_evict_vram(adev);
2760*b843c749SSergey Zigachev 
2761*b843c749SSergey Zigachev 	pci_save_state(dev->pdev);
2762*b843c749SSergey Zigachev 	if (suspend) {
2763*b843c749SSergey Zigachev 		/* Shut down the device */
2764*b843c749SSergey Zigachev 		pci_disable_device(dev->pdev);
2765*b843c749SSergey Zigachev 		pci_set_power_state(dev->pdev, PCI_D3hot);
2766*b843c749SSergey Zigachev 	} else {
2767*b843c749SSergey Zigachev 		r = amdgpu_asic_reset(adev);
2768*b843c749SSergey Zigachev 		if (r)
2769*b843c749SSergey Zigachev 			DRM_ERROR("amdgpu asic reset failed\n");
2770*b843c749SSergey Zigachev 	}
2771*b843c749SSergey Zigachev 
2772*b843c749SSergey Zigachev 	return 0;
2773*b843c749SSergey Zigachev }
2774*b843c749SSergey Zigachev 
2775*b843c749SSergey Zigachev /**
2776*b843c749SSergey Zigachev  * amdgpu_device_resume - initiate device resume
2777*b843c749SSergey Zigachev  *
2778*b843c749SSergey Zigachev  * @dev: drm dev pointer
2779*b843c749SSergey Zigachev  * @resume: resume state
2780*b843c749SSergey Zigachev  * @fbcon : notify the fbdev of resume
2781*b843c749SSergey Zigachev  *
2782*b843c749SSergey Zigachev  * Bring the hw back to operating state (all asics).
2783*b843c749SSergey Zigachev  * Returns 0 for success or an error on failure.
2784*b843c749SSergey Zigachev  * Called at driver resume.
2785*b843c749SSergey Zigachev  */
2786*b843c749SSergey Zigachev int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2787*b843c749SSergey Zigachev {
2788*b843c749SSergey Zigachev 	struct drm_connector *connector;
2789*b843c749SSergey Zigachev 	struct amdgpu_device *adev = dev->dev_private;
2790*b843c749SSergey Zigachev 	struct drm_crtc *crtc;
2791*b843c749SSergey Zigachev 	int r = 0;
2792*b843c749SSergey Zigachev 
2793*b843c749SSergey Zigachev 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2794*b843c749SSergey Zigachev 		return 0;
2795*b843c749SSergey Zigachev 
2796*b843c749SSergey Zigachev 	if (resume) {
2797*b843c749SSergey Zigachev 		pci_set_power_state(dev->pdev, PCI_D0);
2798*b843c749SSergey Zigachev 		pci_restore_state(dev->pdev);
2799*b843c749SSergey Zigachev 		r = pci_enable_device(dev->pdev);
2800*b843c749SSergey Zigachev 		if (r)
2801*b843c749SSergey Zigachev 			return r;
2802*b843c749SSergey Zigachev 	}
2803*b843c749SSergey Zigachev 
2804*b843c749SSergey Zigachev 	/* post card */
2805*b843c749SSergey Zigachev 	if (amdgpu_device_need_post(adev)) {
2806*b843c749SSergey Zigachev 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2807*b843c749SSergey Zigachev 		if (r)
2808*b843c749SSergey Zigachev 			DRM_ERROR("amdgpu asic init failed\n");
2809*b843c749SSergey Zigachev 	}
2810*b843c749SSergey Zigachev 
2811*b843c749SSergey Zigachev 	r = amdgpu_device_ip_resume(adev);
2812*b843c749SSergey Zigachev 	if (r) {
2813*b843c749SSergey Zigachev 		DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
2814*b843c749SSergey Zigachev 		return r;
2815*b843c749SSergey Zigachev 	}
2816*b843c749SSergey Zigachev 	amdgpu_fence_driver_resume(adev);
2817*b843c749SSergey Zigachev 
2818*b843c749SSergey Zigachev 
2819*b843c749SSergey Zigachev 	r = amdgpu_device_ip_late_init(adev);
2820*b843c749SSergey Zigachev 	if (r)
2821*b843c749SSergey Zigachev 		return r;
2822*b843c749SSergey Zigachev 
2823*b843c749SSergey Zigachev 	if (!amdgpu_device_has_dc_support(adev)) {
2824*b843c749SSergey Zigachev 		/* pin cursors */
2825*b843c749SSergey Zigachev 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2826*b843c749SSergey Zigachev 			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2827*b843c749SSergey Zigachev 
2828*b843c749SSergey Zigachev 			if (amdgpu_crtc->cursor_bo) {
2829*b843c749SSergey Zigachev 				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2830*b843c749SSergey Zigachev 				r = amdgpu_bo_reserve(aobj, true);
2831*b843c749SSergey Zigachev 				if (r == 0) {
2832*b843c749SSergey Zigachev 					r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2833*b843c749SSergey Zigachev 					if (r != 0)
2834*b843c749SSergey Zigachev 						DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2835*b843c749SSergey Zigachev 					amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2836*b843c749SSergey Zigachev 					amdgpu_bo_unreserve(aobj);
2837*b843c749SSergey Zigachev 				}
2838*b843c749SSergey Zigachev 			}
2839*b843c749SSergey Zigachev 		}
2840*b843c749SSergey Zigachev 	}
2841*b843c749SSergey Zigachev 	r = amdgpu_amdkfd_resume(adev);
2842*b843c749SSergey Zigachev 	if (r)
2843*b843c749SSergey Zigachev 		return r;
2844*b843c749SSergey Zigachev 
2845*b843c749SSergey Zigachev 	/* Make sure IB tests flushed */
2846*b843c749SSergey Zigachev 	flush_delayed_work(&adev->late_init_work);
2847*b843c749SSergey Zigachev 
2848*b843c749SSergey Zigachev 	/* blat the mode back in */
2849*b843c749SSergey Zigachev 	if (fbcon) {
2850*b843c749SSergey Zigachev 		if (!amdgpu_device_has_dc_support(adev)) {
2851*b843c749SSergey Zigachev 			/* pre DCE11 */
2852*b843c749SSergey Zigachev 			drm_helper_resume_force_mode(dev);
2853*b843c749SSergey Zigachev 
2854*b843c749SSergey Zigachev 			/* turn on display hw */
2855*b843c749SSergey Zigachev 			drm_modeset_lock_all(dev);
2856*b843c749SSergey Zigachev 			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2857*b843c749SSergey Zigachev 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2858*b843c749SSergey Zigachev 			}
2859*b843c749SSergey Zigachev 			drm_modeset_unlock_all(dev);
2860*b843c749SSergey Zigachev 		}
2861*b843c749SSergey Zigachev 		amdgpu_fbdev_set_suspend(adev, 0);
2862*b843c749SSergey Zigachev 	}
2863*b843c749SSergey Zigachev 
2864*b843c749SSergey Zigachev 	drm_kms_helper_poll_enable(dev);
2865*b843c749SSergey Zigachev 
2866*b843c749SSergey Zigachev 	/*
2867*b843c749SSergey Zigachev 	 * Most of the connector probing functions try to acquire runtime pm
2868*b843c749SSergey Zigachev 	 * refs to ensure that the GPU is powered on when connector polling is
2869*b843c749SSergey Zigachev 	 * performed. Since we're calling this from a runtime PM callback,
2870*b843c749SSergey Zigachev 	 * trying to acquire rpm refs will cause us to deadlock.
2871*b843c749SSergey Zigachev 	 *
2872*b843c749SSergey Zigachev 	 * Since we're guaranteed to be holding the rpm lock, it's safe to
2873*b843c749SSergey Zigachev 	 * temporarily disable the rpm helpers so this doesn't deadlock us.
2874*b843c749SSergey Zigachev 	 */
2875*b843c749SSergey Zigachev #ifdef CONFIG_PM
2876*b843c749SSergey Zigachev 	dev->dev->power.disable_depth++;
2877*b843c749SSergey Zigachev #endif
2878*b843c749SSergey Zigachev 	if (!amdgpu_device_has_dc_support(adev))
2879*b843c749SSergey Zigachev 		drm_helper_hpd_irq_event(dev);
2880*b843c749SSergey Zigachev 	else
2881*b843c749SSergey Zigachev 		drm_kms_helper_hotplug_event(dev);
2882*b843c749SSergey Zigachev #ifdef CONFIG_PM
2883*b843c749SSergey Zigachev 	dev->dev->power.disable_depth--;
2884*b843c749SSergey Zigachev #endif
2885*b843c749SSergey Zigachev 	return 0;
2886*b843c749SSergey Zigachev }
2887*b843c749SSergey Zigachev 
2888*b843c749SSergey Zigachev /**
2889*b843c749SSergey Zigachev  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
2890*b843c749SSergey Zigachev  *
2891*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2892*b843c749SSergey Zigachev  *
2893*b843c749SSergey Zigachev  * The list of all the hardware IPs that make up the asic is walked and
2894*b843c749SSergey Zigachev  * the check_soft_reset callbacks are run.  check_soft_reset determines
2895*b843c749SSergey Zigachev  * if the asic is still hung or not.
2896*b843c749SSergey Zigachev  * Returns true if any of the IPs are still in a hung state, false if not.
2897*b843c749SSergey Zigachev  */
2898*b843c749SSergey Zigachev static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
2899*b843c749SSergey Zigachev {
2900*b843c749SSergey Zigachev 	int i;
2901*b843c749SSergey Zigachev 	bool asic_hang = false;
2902*b843c749SSergey Zigachev 
2903*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
2904*b843c749SSergey Zigachev 		return true;
2905*b843c749SSergey Zigachev 
2906*b843c749SSergey Zigachev 	if (amdgpu_asic_need_full_reset(adev))
2907*b843c749SSergey Zigachev 		return true;
2908*b843c749SSergey Zigachev 
2909*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
2910*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
2911*b843c749SSergey Zigachev 			continue;
2912*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2913*b843c749SSergey Zigachev 			adev->ip_blocks[i].status.hang =
2914*b843c749SSergey Zigachev 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2915*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].status.hang) {
2916*b843c749SSergey Zigachev 			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2917*b843c749SSergey Zigachev 			asic_hang = true;
2918*b843c749SSergey Zigachev 		}
2919*b843c749SSergey Zigachev 	}
2920*b843c749SSergey Zigachev 	return asic_hang;
2921*b843c749SSergey Zigachev }
2922*b843c749SSergey Zigachev 
2923*b843c749SSergey Zigachev /**
2924*b843c749SSergey Zigachev  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
2925*b843c749SSergey Zigachev  *
2926*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2927*b843c749SSergey Zigachev  *
2928*b843c749SSergey Zigachev  * The list of all the hardware IPs that make up the asic is walked and the
2929*b843c749SSergey Zigachev  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
2930*b843c749SSergey Zigachev  * handles any IP specific hardware or software state changes that are
2931*b843c749SSergey Zigachev  * necessary for a soft reset to succeed.
2932*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
2933*b843c749SSergey Zigachev  */
2934*b843c749SSergey Zigachev static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
2935*b843c749SSergey Zigachev {
2936*b843c749SSergey Zigachev 	int i, r = 0;
2937*b843c749SSergey Zigachev 
2938*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
2939*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
2940*b843c749SSergey Zigachev 			continue;
2941*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].status.hang &&
2942*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2943*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2944*b843c749SSergey Zigachev 			if (r)
2945*b843c749SSergey Zigachev 				return r;
2946*b843c749SSergey Zigachev 		}
2947*b843c749SSergey Zigachev 	}
2948*b843c749SSergey Zigachev 
2949*b843c749SSergey Zigachev 	return 0;
2950*b843c749SSergey Zigachev }
2951*b843c749SSergey Zigachev 
2952*b843c749SSergey Zigachev /**
2953*b843c749SSergey Zigachev  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
2954*b843c749SSergey Zigachev  *
2955*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2956*b843c749SSergey Zigachev  *
2957*b843c749SSergey Zigachev  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
2958*b843c749SSergey Zigachev  * reset is necessary to recover.
2959*b843c749SSergey Zigachev  * Returns true if a full asic reset is required, false if not.
2960*b843c749SSergey Zigachev  */
2961*b843c749SSergey Zigachev static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
2962*b843c749SSergey Zigachev {
2963*b843c749SSergey Zigachev 	int i;
2964*b843c749SSergey Zigachev 
2965*b843c749SSergey Zigachev 	if (amdgpu_asic_need_full_reset(adev))
2966*b843c749SSergey Zigachev 		return true;
2967*b843c749SSergey Zigachev 
2968*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
2969*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
2970*b843c749SSergey Zigachev 			continue;
2971*b843c749SSergey Zigachev 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2972*b843c749SSergey Zigachev 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2973*b843c749SSergey Zigachev 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2974*b843c749SSergey Zigachev 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2975*b843c749SSergey Zigachev 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2976*b843c749SSergey Zigachev 			if (adev->ip_blocks[i].status.hang) {
2977*b843c749SSergey Zigachev 				DRM_INFO("Some block need full reset!\n");
2978*b843c749SSergey Zigachev 				return true;
2979*b843c749SSergey Zigachev 			}
2980*b843c749SSergey Zigachev 		}
2981*b843c749SSergey Zigachev 	}
2982*b843c749SSergey Zigachev 	return false;
2983*b843c749SSergey Zigachev }
2984*b843c749SSergey Zigachev 
2985*b843c749SSergey Zigachev /**
2986*b843c749SSergey Zigachev  * amdgpu_device_ip_soft_reset - do a soft reset
2987*b843c749SSergey Zigachev  *
2988*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2989*b843c749SSergey Zigachev  *
2990*b843c749SSergey Zigachev  * The list of all the hardware IPs that make up the asic is walked and the
2991*b843c749SSergey Zigachev  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
2992*b843c749SSergey Zigachev  * IP specific hardware or software state changes that are necessary to soft
2993*b843c749SSergey Zigachev  * reset the IP.
2994*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
2995*b843c749SSergey Zigachev  */
2996*b843c749SSergey Zigachev static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
2997*b843c749SSergey Zigachev {
2998*b843c749SSergey Zigachev 	int i, r = 0;
2999*b843c749SSergey Zigachev 
3000*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
3001*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
3002*b843c749SSergey Zigachev 			continue;
3003*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].status.hang &&
3004*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->funcs->soft_reset) {
3005*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3006*b843c749SSergey Zigachev 			if (r)
3007*b843c749SSergey Zigachev 				return r;
3008*b843c749SSergey Zigachev 		}
3009*b843c749SSergey Zigachev 	}
3010*b843c749SSergey Zigachev 
3011*b843c749SSergey Zigachev 	return 0;
3012*b843c749SSergey Zigachev }
3013*b843c749SSergey Zigachev 
3014*b843c749SSergey Zigachev /**
3015*b843c749SSergey Zigachev  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3016*b843c749SSergey Zigachev  *
3017*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
3018*b843c749SSergey Zigachev  *
3019*b843c749SSergey Zigachev  * The list of all the hardware IPs that make up the asic is walked and the
3020*b843c749SSergey Zigachev  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
3021*b843c749SSergey Zigachev  * handles any IP specific hardware or software state changes that are
3022*b843c749SSergey Zigachev  * necessary after the IP has been soft reset.
3023*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
3024*b843c749SSergey Zigachev  */
3025*b843c749SSergey Zigachev static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3026*b843c749SSergey Zigachev {
3027*b843c749SSergey Zigachev 	int i, r = 0;
3028*b843c749SSergey Zigachev 
3029*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
3030*b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
3031*b843c749SSergey Zigachev 			continue;
3032*b843c749SSergey Zigachev 		if (adev->ip_blocks[i].status.hang &&
3033*b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
3034*b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3035*b843c749SSergey Zigachev 		if (r)
3036*b843c749SSergey Zigachev 			return r;
3037*b843c749SSergey Zigachev 	}
3038*b843c749SSergey Zigachev 
3039*b843c749SSergey Zigachev 	return 0;
3040*b843c749SSergey Zigachev }
3041*b843c749SSergey Zigachev 
3042*b843c749SSergey Zigachev /**
3043*b843c749SSergey Zigachev  * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers
3044*b843c749SSergey Zigachev  *
3045*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
3046*b843c749SSergey Zigachev  * @ring: amdgpu_ring for the engine handling the buffer operations
3047*b843c749SSergey Zigachev  * @bo: amdgpu_bo buffer whose shadow is being restored
3048*b843c749SSergey Zigachev  * @fence: dma_fence associated with the operation
3049*b843c749SSergey Zigachev  *
3050*b843c749SSergey Zigachev  * Restores the VRAM buffer contents from the shadow in GTT.  Used to
3051*b843c749SSergey Zigachev  * restore things like GPUVM page tables after a GPU reset where
3052*b843c749SSergey Zigachev  * the contents of VRAM might be lost.
3053*b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
3054*b843c749SSergey Zigachev  */
3055*b843c749SSergey Zigachev static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
3056*b843c749SSergey Zigachev 						  struct amdgpu_ring *ring,
3057*b843c749SSergey Zigachev 						  struct amdgpu_bo *bo,
3058*b843c749SSergey Zigachev 						  struct dma_fence **fence)
3059*b843c749SSergey Zigachev {
3060*b843c749SSergey Zigachev 	uint32_t domain;
3061*b843c749SSergey Zigachev 	int r;
3062*b843c749SSergey Zigachev 
3063*b843c749SSergey Zigachev 	if (!bo->shadow)
3064*b843c749SSergey Zigachev 		return 0;
3065*b843c749SSergey Zigachev 
3066*b843c749SSergey Zigachev 	r = amdgpu_bo_reserve(bo, true);
3067*b843c749SSergey Zigachev 	if (r)
3068*b843c749SSergey Zigachev 		return r;
3069*b843c749SSergey Zigachev 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
3070*b843c749SSergey Zigachev 	/* if bo has been evicted, then no need to recover */
3071*b843c749SSergey Zigachev 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
3072*b843c749SSergey Zigachev 		r = amdgpu_bo_validate(bo->shadow);
3073*b843c749SSergey Zigachev 		if (r) {
3074*b843c749SSergey Zigachev 			DRM_ERROR("bo validate failed!\n");
3075*b843c749SSergey Zigachev 			goto err;
3076*b843c749SSergey Zigachev 		}
3077*b843c749SSergey Zigachev 
3078*b843c749SSergey Zigachev 		r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
3079*b843c749SSergey Zigachev 						 NULL, fence, true);
3080*b843c749SSergey Zigachev 		if (r) {
3081*b843c749SSergey Zigachev 			DRM_ERROR("recover page table failed!\n");
3082*b843c749SSergey Zigachev 			goto err;
3083*b843c749SSergey Zigachev 		}
3084*b843c749SSergey Zigachev 	}
3085*b843c749SSergey Zigachev err:
3086*b843c749SSergey Zigachev 	amdgpu_bo_unreserve(bo);
3087*b843c749SSergey Zigachev 	return r;
3088*b843c749SSergey Zigachev }
3089*b843c749SSergey Zigachev 
3090*b843c749SSergey Zigachev /**
3091*b843c749SSergey Zigachev  * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents
3092*b843c749SSergey Zigachev  *
3093*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
3094*b843c749SSergey Zigachev  *
3095*b843c749SSergey Zigachev  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
3096*b843c749SSergey Zigachev  * restore things like GPUVM page tables after a GPU reset where
3097*b843c749SSergey Zigachev  * the contents of VRAM might be lost.
3098*b843c749SSergey Zigachev  * Returns 0 on success, 1 on failure.
3099*b843c749SSergey Zigachev  */
3100*b843c749SSergey Zigachev static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
3101*b843c749SSergey Zigachev {
3102*b843c749SSergey Zigachev 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
3103*b843c749SSergey Zigachev 	struct amdgpu_bo *bo, *tmp;
3104*b843c749SSergey Zigachev 	struct dma_fence *fence = NULL, *next = NULL;
3105*b843c749SSergey Zigachev 	long r = 1;
3106*b843c749SSergey Zigachev 	int i = 0;
3107*b843c749SSergey Zigachev 	long tmo;
3108*b843c749SSergey Zigachev 
3109*b843c749SSergey Zigachev 	if (amdgpu_sriov_runtime(adev))
3110*b843c749SSergey Zigachev 		tmo = msecs_to_jiffies(8000);
3111*b843c749SSergey Zigachev 	else
3112*b843c749SSergey Zigachev 		tmo = msecs_to_jiffies(100);
3113*b843c749SSergey Zigachev 
3114*b843c749SSergey Zigachev 	DRM_INFO("recover vram bo from shadow start\n");
3115*b843c749SSergey Zigachev 	mutex_lock(&adev->shadow_list_lock);
3116*b843c749SSergey Zigachev 	list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
3117*b843c749SSergey Zigachev 		next = NULL;
3118*b843c749SSergey Zigachev 		amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
3119*b843c749SSergey Zigachev 		if (fence) {
3120*b843c749SSergey Zigachev 			r = dma_fence_wait_timeout(fence, false, tmo);
3121*b843c749SSergey Zigachev 			if (r == 0)
3122*b843c749SSergey Zigachev 				pr_err("wait fence %p[%d] timeout\n", fence, i);
3123*b843c749SSergey Zigachev 			else if (r < 0)
3124*b843c749SSergey Zigachev 				pr_err("wait fence %p[%d] interrupted\n", fence, i);
3125*b843c749SSergey Zigachev 			if (r < 1) {
3126*b843c749SSergey Zigachev 				dma_fence_put(fence);
3127*b843c749SSergey Zigachev 				fence = next;
3128*b843c749SSergey Zigachev 				break;
3129*b843c749SSergey Zigachev 			}
3130*b843c749SSergey Zigachev 			i++;
3131*b843c749SSergey Zigachev 		}
3132*b843c749SSergey Zigachev 
3133*b843c749SSergey Zigachev 		dma_fence_put(fence);
3134*b843c749SSergey Zigachev 		fence = next;
3135*b843c749SSergey Zigachev 	}
3136*b843c749SSergey Zigachev 	mutex_unlock(&adev->shadow_list_lock);
3137*b843c749SSergey Zigachev 
3138*b843c749SSergey Zigachev 	if (fence) {
3139*b843c749SSergey Zigachev 		r = dma_fence_wait_timeout(fence, false, tmo);
3140*b843c749SSergey Zigachev 		if (r == 0)
3141*b843c749SSergey Zigachev 			pr_err("wait fence %p[%d] timeout\n", fence, i);
3142*b843c749SSergey Zigachev 		else if (r < 0)
3143*b843c749SSergey Zigachev 			pr_err("wait fence %p[%d] interrupted\n", fence, i);
3144*b843c749SSergey Zigachev 
3145*b843c749SSergey Zigachev 	}
3146*b843c749SSergey Zigachev 	dma_fence_put(fence);
3147*b843c749SSergey Zigachev 
3148*b843c749SSergey Zigachev 	if (r > 0)
3149*b843c749SSergey Zigachev 		DRM_INFO("recover vram bo from shadow done\n");
3150*b843c749SSergey Zigachev 	else
3151*b843c749SSergey Zigachev 		DRM_ERROR("recover vram bo from shadow failed\n");
3152*b843c749SSergey Zigachev 
3153*b843c749SSergey Zigachev 	return (r > 0) ? 0 : 1;
3154*b843c749SSergey Zigachev }
3155*b843c749SSergey Zigachev 
3156*b843c749SSergey Zigachev /**
3157*b843c749SSergey Zigachev  * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
3158*b843c749SSergey Zigachev  *
3159*b843c749SSergey Zigachev  * @adev: amdgpu device pointer
3160*b843c749SSergey Zigachev  *
3161*b843c749SSergey Zigachev  * attempt to do soft-reset or full-reset and reinitialize Asic
3162*b843c749SSergey Zigachev  * return 0 means succeeded otherwise failed
3163*b843c749SSergey Zigachev  */
3164*b843c749SSergey Zigachev static int amdgpu_device_reset(struct amdgpu_device *adev)
3165*b843c749SSergey Zigachev {
3166*b843c749SSergey Zigachev 	bool need_full_reset, vram_lost = 0;
3167*b843c749SSergey Zigachev 	int r;
3168*b843c749SSergey Zigachev 
3169*b843c749SSergey Zigachev 	need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3170*b843c749SSergey Zigachev 
3171*b843c749SSergey Zigachev 	if (!need_full_reset) {
3172*b843c749SSergey Zigachev 		amdgpu_device_ip_pre_soft_reset(adev);
3173*b843c749SSergey Zigachev 		r = amdgpu_device_ip_soft_reset(adev);
3174*b843c749SSergey Zigachev 		amdgpu_device_ip_post_soft_reset(adev);
3175*b843c749SSergey Zigachev 		if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3176*b843c749SSergey Zigachev 			DRM_INFO("soft reset failed, will fallback to full reset!\n");
3177*b843c749SSergey Zigachev 			need_full_reset = true;
3178*b843c749SSergey Zigachev 		}
3179*b843c749SSergey Zigachev 	}
3180*b843c749SSergey Zigachev 
3181*b843c749SSergey Zigachev 	if (need_full_reset) {
3182*b843c749SSergey Zigachev 		r = amdgpu_device_ip_suspend(adev);
3183*b843c749SSergey Zigachev 
3184*b843c749SSergey Zigachev retry:
3185*b843c749SSergey Zigachev 		r = amdgpu_asic_reset(adev);
3186*b843c749SSergey Zigachev 		/* post card */
3187*b843c749SSergey Zigachev 		amdgpu_atom_asic_init(adev->mode_info.atom_context);
3188*b843c749SSergey Zigachev 
3189*b843c749SSergey Zigachev 		if (!r) {
3190*b843c749SSergey Zigachev 			dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
3191*b843c749SSergey Zigachev 			r = amdgpu_device_ip_resume_phase1(adev);
3192*b843c749SSergey Zigachev 			if (r)
3193*b843c749SSergey Zigachev 				goto out;
3194*b843c749SSergey Zigachev 
3195*b843c749SSergey Zigachev 			vram_lost = amdgpu_device_check_vram_lost(adev);
3196*b843c749SSergey Zigachev 			if (vram_lost) {
3197*b843c749SSergey Zigachev 				DRM_ERROR("VRAM is lost!\n");
3198*b843c749SSergey Zigachev 				atomic_inc(&adev->vram_lost_counter);
3199*b843c749SSergey Zigachev 			}
3200*b843c749SSergey Zigachev 
3201*b843c749SSergey Zigachev 			r = amdgpu_gtt_mgr_recover(
3202*b843c749SSergey Zigachev 				&adev->mman.bdev.man[TTM_PL_TT]);
3203*b843c749SSergey Zigachev 			if (r)
3204*b843c749SSergey Zigachev 				goto out;
3205*b843c749SSergey Zigachev 
3206*b843c749SSergey Zigachev 			r = amdgpu_device_ip_resume_phase2(adev);
3207*b843c749SSergey Zigachev 			if (r)
3208*b843c749SSergey Zigachev 				goto out;
3209*b843c749SSergey Zigachev 
3210*b843c749SSergey Zigachev 			if (vram_lost)
3211*b843c749SSergey Zigachev 				amdgpu_device_fill_reset_magic(adev);
3212*b843c749SSergey Zigachev 		}
3213*b843c749SSergey Zigachev 	}
3214*b843c749SSergey Zigachev 
3215*b843c749SSergey Zigachev out:
3216*b843c749SSergey Zigachev 	if (!r) {
3217*b843c749SSergey Zigachev 		amdgpu_irq_gpu_reset_resume_helper(adev);
3218*b843c749SSergey Zigachev 		r = amdgpu_ib_ring_tests(adev);
3219*b843c749SSergey Zigachev 		if (r) {
3220*b843c749SSergey Zigachev 			dev_err(adev->dev, "ib ring test failed (%d).\n", r);
3221*b843c749SSergey Zigachev 			r = amdgpu_device_ip_suspend(adev);
3222*b843c749SSergey Zigachev 			need_full_reset = true;
3223*b843c749SSergey Zigachev 			goto retry;
3224*b843c749SSergey Zigachev 		}
3225*b843c749SSergey Zigachev 	}
3226*b843c749SSergey Zigachev 
3227*b843c749SSergey Zigachev 	if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
3228*b843c749SSergey Zigachev 		r = amdgpu_device_handle_vram_lost(adev);
3229*b843c749SSergey Zigachev 
3230*b843c749SSergey Zigachev 	return r;
3231*b843c749SSergey Zigachev }
3232*b843c749SSergey Zigachev 
3233*b843c749SSergey Zigachev /**
3234*b843c749SSergey Zigachev  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3235*b843c749SSergey Zigachev  *
3236*b843c749SSergey Zigachev  * @adev: amdgpu device pointer
3237*b843c749SSergey Zigachev  * @from_hypervisor: request from hypervisor
3238*b843c749SSergey Zigachev  *
3239*b843c749SSergey Zigachev  * do VF FLR and reinitialize Asic
3240*b843c749SSergey Zigachev  * return 0 means succeeded otherwise failed
3241*b843c749SSergey Zigachev  */
3242*b843c749SSergey Zigachev static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3243*b843c749SSergey Zigachev 				     bool from_hypervisor)
3244*b843c749SSergey Zigachev {
3245*b843c749SSergey Zigachev 	int r;
3246*b843c749SSergey Zigachev 
3247*b843c749SSergey Zigachev 	if (from_hypervisor)
3248*b843c749SSergey Zigachev 		r = amdgpu_virt_request_full_gpu(adev, true);
3249*b843c749SSergey Zigachev 	else
3250*b843c749SSergey Zigachev 		r = amdgpu_virt_reset_gpu(adev);
3251*b843c749SSergey Zigachev 	if (r)
3252*b843c749SSergey Zigachev 		return r;
3253*b843c749SSergey Zigachev 
3254*b843c749SSergey Zigachev 	/* Resume IP prior to SMC */
3255*b843c749SSergey Zigachev 	r = amdgpu_device_ip_reinit_early_sriov(adev);
3256*b843c749SSergey Zigachev 	if (r)
3257*b843c749SSergey Zigachev 		goto error;
3258*b843c749SSergey Zigachev 
3259*b843c749SSergey Zigachev 	/* we need recover gart prior to run SMC/CP/SDMA resume */
3260*b843c749SSergey Zigachev 	amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3261*b843c749SSergey Zigachev 
3262*b843c749SSergey Zigachev 	/* now we are okay to resume SMC/CP/SDMA */
3263*b843c749SSergey Zigachev 	r = amdgpu_device_ip_reinit_late_sriov(adev);
3264*b843c749SSergey Zigachev 	if (r)
3265*b843c749SSergey Zigachev 		goto error;
3266*b843c749SSergey Zigachev 
3267*b843c749SSergey Zigachev 	amdgpu_irq_gpu_reset_resume_helper(adev);
3268*b843c749SSergey Zigachev 	r = amdgpu_ib_ring_tests(adev);
3269*b843c749SSergey Zigachev 
3270*b843c749SSergey Zigachev error:
3271*b843c749SSergey Zigachev 	amdgpu_virt_init_data_exchange(adev);
3272*b843c749SSergey Zigachev 	amdgpu_virt_release_full_gpu(adev, true);
3273*b843c749SSergey Zigachev 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3274*b843c749SSergey Zigachev 		atomic_inc(&adev->vram_lost_counter);
3275*b843c749SSergey Zigachev 		r = amdgpu_device_handle_vram_lost(adev);
3276*b843c749SSergey Zigachev 	}
3277*b843c749SSergey Zigachev 
3278*b843c749SSergey Zigachev 	return r;
3279*b843c749SSergey Zigachev }
3280*b843c749SSergey Zigachev 
3281*b843c749SSergey Zigachev /**
3282*b843c749SSergey Zigachev  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
3283*b843c749SSergey Zigachev  *
3284*b843c749SSergey Zigachev  * @adev: amdgpu device pointer
3285*b843c749SSergey Zigachev  * @job: which job trigger hang
3286*b843c749SSergey Zigachev  * @force: forces reset regardless of amdgpu_gpu_recovery
3287*b843c749SSergey Zigachev  *
3288*b843c749SSergey Zigachev  * Attempt to reset the GPU if it has hung (all asics).
3289*b843c749SSergey Zigachev  * Returns 0 for success or an error on failure.
3290*b843c749SSergey Zigachev  */
3291*b843c749SSergey Zigachev int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3292*b843c749SSergey Zigachev 			      struct amdgpu_job *job, bool force)
3293*b843c749SSergey Zigachev {
3294*b843c749SSergey Zigachev 	int i, r, resched;
3295*b843c749SSergey Zigachev 
3296*b843c749SSergey Zigachev 	if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
3297*b843c749SSergey Zigachev 		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
3298*b843c749SSergey Zigachev 		return 0;
3299*b843c749SSergey Zigachev 	}
3300*b843c749SSergey Zigachev 
3301*b843c749SSergey Zigachev 	if (!force && (amdgpu_gpu_recovery == 0 ||
3302*b843c749SSergey Zigachev 			(amdgpu_gpu_recovery == -1  && !amdgpu_sriov_vf(adev)))) {
3303*b843c749SSergey Zigachev 		DRM_INFO("GPU recovery disabled.\n");
3304*b843c749SSergey Zigachev 		return 0;
3305*b843c749SSergey Zigachev 	}
3306*b843c749SSergey Zigachev 
3307*b843c749SSergey Zigachev 	dev_info(adev->dev, "GPU reset begin!\n");
3308*b843c749SSergey Zigachev 
3309*b843c749SSergey Zigachev 	mutex_lock(&adev->lock_reset);
3310*b843c749SSergey Zigachev 	atomic_inc(&adev->gpu_reset_counter);
3311*b843c749SSergey Zigachev 	adev->in_gpu_reset = 1;
3312*b843c749SSergey Zigachev 
3313*b843c749SSergey Zigachev 	/* Block kfd */
3314*b843c749SSergey Zigachev 	amdgpu_amdkfd_pre_reset(adev);
3315*b843c749SSergey Zigachev 
3316*b843c749SSergey Zigachev 	/* block TTM */
3317*b843c749SSergey Zigachev 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3318*b843c749SSergey Zigachev 
3319*b843c749SSergey Zigachev 	/* block all schedulers and reset given job's ring */
3320*b843c749SSergey Zigachev 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3321*b843c749SSergey Zigachev 		struct amdgpu_ring *ring = adev->rings[i];
3322*b843c749SSergey Zigachev 
3323*b843c749SSergey Zigachev 		if (!ring || !ring->sched.thread)
3324*b843c749SSergey Zigachev 			continue;
3325*b843c749SSergey Zigachev 
3326*b843c749SSergey Zigachev 		kthread_park(ring->sched.thread);
3327*b843c749SSergey Zigachev 
3328*b843c749SSergey Zigachev 		if (job && job->base.sched == &ring->sched)
3329*b843c749SSergey Zigachev 			continue;
3330*b843c749SSergey Zigachev 
3331*b843c749SSergey Zigachev 		drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
3332*b843c749SSergey Zigachev 
3333*b843c749SSergey Zigachev 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3334*b843c749SSergey Zigachev 		amdgpu_fence_driver_force_completion(ring);
3335*b843c749SSergey Zigachev 	}
3336*b843c749SSergey Zigachev 
3337*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
3338*b843c749SSergey Zigachev 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
3339*b843c749SSergey Zigachev 	else
3340*b843c749SSergey Zigachev 		r = amdgpu_device_reset(adev);
3341*b843c749SSergey Zigachev 
3342*b843c749SSergey Zigachev 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3343*b843c749SSergey Zigachev 		struct amdgpu_ring *ring = adev->rings[i];
3344*b843c749SSergey Zigachev 
3345*b843c749SSergey Zigachev 		if (!ring || !ring->sched.thread)
3346*b843c749SSergey Zigachev 			continue;
3347*b843c749SSergey Zigachev 
3348*b843c749SSergey Zigachev 		/* only need recovery sched of the given job's ring
3349*b843c749SSergey Zigachev 		 * or all rings (in the case @job is NULL)
3350*b843c749SSergey Zigachev 		 * after above amdgpu_reset accomplished
3351*b843c749SSergey Zigachev 		 */
3352*b843c749SSergey Zigachev 		if ((!job || job->base.sched == &ring->sched) && !r)
3353*b843c749SSergey Zigachev 			drm_sched_job_recovery(&ring->sched);
3354*b843c749SSergey Zigachev 
3355*b843c749SSergey Zigachev 		kthread_unpark(ring->sched.thread);
3356*b843c749SSergey Zigachev 	}
3357*b843c749SSergey Zigachev 
3358*b843c749SSergey Zigachev 	if (!amdgpu_device_has_dc_support(adev)) {
3359*b843c749SSergey Zigachev 		drm_helper_resume_force_mode(adev->ddev);
3360*b843c749SSergey Zigachev 	}
3361*b843c749SSergey Zigachev 
3362*b843c749SSergey Zigachev 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
3363*b843c749SSergey Zigachev 
3364*b843c749SSergey Zigachev 	if (r) {
3365*b843c749SSergey Zigachev 		/* bad news, how to tell it to userspace ? */
3366*b843c749SSergey Zigachev 		dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3367*b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3368*b843c749SSergey Zigachev 	} else {
3369*b843c749SSergey Zigachev 		dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
3370*b843c749SSergey Zigachev 	}
3371*b843c749SSergey Zigachev 
3372*b843c749SSergey Zigachev 	/*unlock kfd */
3373*b843c749SSergey Zigachev 	amdgpu_amdkfd_post_reset(adev);
3374*b843c749SSergey Zigachev 	amdgpu_vf_error_trans_all(adev);
3375*b843c749SSergey Zigachev 	adev->in_gpu_reset = 0;
3376*b843c749SSergey Zigachev 	mutex_unlock(&adev->lock_reset);
3377*b843c749SSergey Zigachev 	return r;
3378*b843c749SSergey Zigachev }
3379*b843c749SSergey Zigachev 
3380*b843c749SSergey Zigachev /**
3381*b843c749SSergey Zigachev  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3382*b843c749SSergey Zigachev  *
3383*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
3384*b843c749SSergey Zigachev  *
3385*b843c749SSergey Zigachev  * Fetchs and stores in the driver the PCIE capabilities (gen speed
3386*b843c749SSergey Zigachev  * and lanes) of the slot the device is in. Handles APUs and
3387*b843c749SSergey Zigachev  * virtualized environments where PCIE config space may not be available.
3388*b843c749SSergey Zigachev  */
3389*b843c749SSergey Zigachev static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3390*b843c749SSergey Zigachev {
3391*b843c749SSergey Zigachev 	struct pci_dev *pdev;
3392*b843c749SSergey Zigachev 	enum pci_bus_speed speed_cap;
3393*b843c749SSergey Zigachev 	enum pcie_link_width link_width;
3394*b843c749SSergey Zigachev 
3395*b843c749SSergey Zigachev 	if (amdgpu_pcie_gen_cap)
3396*b843c749SSergey Zigachev 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3397*b843c749SSergey Zigachev 
3398*b843c749SSergey Zigachev 	if (amdgpu_pcie_lane_cap)
3399*b843c749SSergey Zigachev 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3400*b843c749SSergey Zigachev 
3401*b843c749SSergey Zigachev 	/* covers APUs as well */
3402*b843c749SSergey Zigachev 	if (pci_is_root_bus(adev->pdev->bus)) {
3403*b843c749SSergey Zigachev 		if (adev->pm.pcie_gen_mask == 0)
3404*b843c749SSergey Zigachev 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3405*b843c749SSergey Zigachev 		if (adev->pm.pcie_mlw_mask == 0)
3406*b843c749SSergey Zigachev 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3407*b843c749SSergey Zigachev 		return;
3408*b843c749SSergey Zigachev 	}
3409*b843c749SSergey Zigachev 
3410*b843c749SSergey Zigachev 	if (adev->pm.pcie_gen_mask == 0) {
3411*b843c749SSergey Zigachev 		/* asic caps */
3412*b843c749SSergey Zigachev 		pdev = adev->pdev;
3413*b843c749SSergey Zigachev 		speed_cap = pcie_get_speed_cap(pdev);
3414*b843c749SSergey Zigachev 		if (speed_cap == PCI_SPEED_UNKNOWN) {
3415*b843c749SSergey Zigachev 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3416*b843c749SSergey Zigachev 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3417*b843c749SSergey Zigachev 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3418*b843c749SSergey Zigachev 		} else {
3419*b843c749SSergey Zigachev 			if (speed_cap == PCIE_SPEED_16_0GT)
3420*b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3421*b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3422*b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
3423*b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
3424*b843c749SSergey Zigachev 			else if (speed_cap == PCIE_SPEED_8_0GT)
3425*b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3426*b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3427*b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3428*b843c749SSergey Zigachev 			else if (speed_cap == PCIE_SPEED_5_0GT)
3429*b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3430*b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
3431*b843c749SSergey Zigachev 			else
3432*b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
3433*b843c749SSergey Zigachev 		}
3434*b843c749SSergey Zigachev 		/* platform caps */
3435*b843c749SSergey Zigachev 		pdev = adev->ddev->pdev->bus->self;
3436*b843c749SSergey Zigachev 		speed_cap = pcie_get_speed_cap(pdev);
3437*b843c749SSergey Zigachev 		if (speed_cap == PCI_SPEED_UNKNOWN) {
3438*b843c749SSergey Zigachev 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3439*b843c749SSergey Zigachev 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
3440*b843c749SSergey Zigachev 		} else {
3441*b843c749SSergey Zigachev 			if (speed_cap == PCIE_SPEED_16_0GT)
3442*b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3443*b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3444*b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
3445*b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
3446*b843c749SSergey Zigachev 			else if (speed_cap == PCIE_SPEED_8_0GT)
3447*b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3448*b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3449*b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
3450*b843c749SSergey Zigachev 			else if (speed_cap == PCIE_SPEED_5_0GT)
3451*b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3452*b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
3453*b843c749SSergey Zigachev 			else
3454*b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3455*b843c749SSergey Zigachev 
3456*b843c749SSergey Zigachev 		}
3457*b843c749SSergey Zigachev 	}
3458*b843c749SSergey Zigachev 	if (adev->pm.pcie_mlw_mask == 0) {
3459*b843c749SSergey Zigachev 		pdev = adev->ddev->pdev->bus->self;
3460*b843c749SSergey Zigachev 		link_width = pcie_get_width_cap(pdev);
3461*b843c749SSergey Zigachev 		if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
3462*b843c749SSergey Zigachev 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
3463*b843c749SSergey Zigachev 		} else {
3464*b843c749SSergey Zigachev 			switch (link_width) {
3465*b843c749SSergey Zigachev 			case PCIE_LNK_X32:
3466*b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3467*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3468*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3469*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3470*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3471*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3472*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3473*b843c749SSergey Zigachev 				break;
3474*b843c749SSergey Zigachev 			case PCIE_LNK_X16:
3475*b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3476*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3477*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3478*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3479*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3480*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3481*b843c749SSergey Zigachev 				break;
3482*b843c749SSergey Zigachev 			case PCIE_LNK_X12:
3483*b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3484*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3485*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3486*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3487*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3488*b843c749SSergey Zigachev 				break;
3489*b843c749SSergey Zigachev 			case PCIE_LNK_X8:
3490*b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3491*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3492*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3493*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3494*b843c749SSergey Zigachev 				break;
3495*b843c749SSergey Zigachev 			case PCIE_LNK_X4:
3496*b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3497*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3498*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3499*b843c749SSergey Zigachev 				break;
3500*b843c749SSergey Zigachev 			case PCIE_LNK_X2:
3501*b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3502*b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3503*b843c749SSergey Zigachev 				break;
3504*b843c749SSergey Zigachev 			case PCIE_LNK_X1:
3505*b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3506*b843c749SSergey Zigachev 				break;
3507*b843c749SSergey Zigachev 			default:
3508*b843c749SSergey Zigachev 				break;
3509*b843c749SSergey Zigachev 			}
3510*b843c749SSergey Zigachev 		}
3511*b843c749SSergey Zigachev 	}
3512*b843c749SSergey Zigachev }
3513*b843c749SSergey Zigachev 
3514