xref: /dflybsd-src/sys/dev/drm/amd/amdgpu/amdgpu_device.c (revision 809f38025e6f424cb8960d509d59de3ddc7d6b98)
1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev  * Copyright 2008 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev  * Copyright 2008 Red Hat Inc.
4b843c749SSergey Zigachev  * Copyright 2009 Jerome Glisse.
5b843c749SSergey Zigachev  *
6b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
7b843c749SSergey Zigachev  * copy of this software and associated documentation files (the "Software"),
8b843c749SSergey Zigachev  * to deal in the Software without restriction, including without limitation
9b843c749SSergey Zigachev  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10b843c749SSergey Zigachev  * and/or sell copies of the Software, and to permit persons to whom the
11b843c749SSergey Zigachev  * Software is furnished to do so, subject to the following conditions:
12b843c749SSergey Zigachev  *
13b843c749SSergey Zigachev  * The above copyright notice and this permission notice shall be included in
14b843c749SSergey Zigachev  * all copies or substantial portions of the Software.
15b843c749SSergey Zigachev  *
16b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19b843c749SSergey Zigachev  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20b843c749SSergey Zigachev  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21b843c749SSergey Zigachev  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22b843c749SSergey Zigachev  * OTHER DEALINGS IN THE SOFTWARE.
23b843c749SSergey Zigachev  *
24b843c749SSergey Zigachev  * Authors: Dave Airlie
25b843c749SSergey Zigachev  *          Alex Deucher
26b843c749SSergey Zigachev  *          Jerome Glisse
27b843c749SSergey Zigachev  */
28b843c749SSergey Zigachev #include <linux/power_supply.h>
29b843c749SSergey Zigachev #include <linux/kthread.h>
30b843c749SSergey Zigachev #include <linux/console.h>
31b843c749SSergey Zigachev #include <linux/slab.h>
32b843c749SSergey Zigachev #include <drm/drmP.h>
33b843c749SSergey Zigachev #include <drm/drm_crtc_helper.h>
34b843c749SSergey Zigachev #include <drm/drm_atomic_helper.h>
35b843c749SSergey Zigachev #include <drm/amdgpu_drm.h>
36b843c749SSergey Zigachev #include <linux/vgaarb.h>
37b843c749SSergey Zigachev #include <linux/vga_switcheroo.h>
38b843c749SSergey Zigachev #include "amdgpu.h"
39b843c749SSergey Zigachev #include "amdgpu_trace.h"
40b843c749SSergey Zigachev #include "amdgpu_i2c.h"
41b843c749SSergey Zigachev #include "atom.h"
42b843c749SSergey Zigachev #include "amdgpu_atombios.h"
43b843c749SSergey Zigachev #include "amdgpu_atomfirmware.h"
44b843c749SSergey Zigachev #include "amd_pcie.h"
45b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_SI
46b843c749SSergey Zigachev #include "si.h"
47b843c749SSergey Zigachev #endif
48b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_CIK
49b843c749SSergey Zigachev #include "cik.h"
50b843c749SSergey Zigachev #endif
51b843c749SSergey Zigachev #include "vi.h"
52b843c749SSergey Zigachev #include "soc15.h"
53b843c749SSergey Zigachev #include "bif/bif_4_1_d.h"
54b843c749SSergey Zigachev #include <linux/pci.h>
55b843c749SSergey Zigachev #include <linux/firmware.h>
56b843c749SSergey Zigachev #include "amdgpu_vf_error.h"
57b843c749SSergey Zigachev 
58b843c749SSergey Zigachev #include "amdgpu_amdkfd.h"
59b843c749SSergey Zigachev #include "amdgpu_pm.h"
60b843c749SSergey Zigachev 
61*809f3802SSergey Zigachev MODULE_FIRMWARE("amdgpufw_vega10_gpu_info");
62*809f3802SSergey Zigachev MODULE_FIRMWARE("amdgpufw_vega12_gpu_info");
63*809f3802SSergey Zigachev MODULE_FIRMWARE("amdgpufw_raven_gpu_info");
64b843c749SSergey Zigachev 
65b843c749SSergey Zigachev #define AMDGPU_RESUME_MS		2000
66b843c749SSergey Zigachev 
67b843c749SSergey Zigachev static const char *amdgpu_asic_name[] = {
68b843c749SSergey Zigachev 	"TAHITI",
69b843c749SSergey Zigachev 	"PITCAIRN",
70b843c749SSergey Zigachev 	"VERDE",
71b843c749SSergey Zigachev 	"OLAND",
72b843c749SSergey Zigachev 	"HAINAN",
73b843c749SSergey Zigachev 	"BONAIRE",
74b843c749SSergey Zigachev 	"KAVERI",
75b843c749SSergey Zigachev 	"KABINI",
76b843c749SSergey Zigachev 	"HAWAII",
77b843c749SSergey Zigachev 	"MULLINS",
78b843c749SSergey Zigachev 	"TOPAZ",
79b843c749SSergey Zigachev 	"TONGA",
80b843c749SSergey Zigachev 	"FIJI",
81b843c749SSergey Zigachev 	"CARRIZO",
82b843c749SSergey Zigachev 	"STONEY",
83b843c749SSergey Zigachev 	"POLARIS10",
84b843c749SSergey Zigachev 	"POLARIS11",
85b843c749SSergey Zigachev 	"POLARIS12",
86b843c749SSergey Zigachev 	"VEGAM",
87b843c749SSergey Zigachev 	"VEGA10",
88b843c749SSergey Zigachev 	"VEGA12",
89b843c749SSergey Zigachev 	"VEGA20",
90b843c749SSergey Zigachev 	"RAVEN",
91b843c749SSergey Zigachev 	"LAST",
92b843c749SSergey Zigachev };
93b843c749SSergey Zigachev 
94b843c749SSergey Zigachev static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
95b843c749SSergey Zigachev 
96b843c749SSergey Zigachev /**
97b843c749SSergey Zigachev  * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
98b843c749SSergey Zigachev  *
99b843c749SSergey Zigachev  * @dev: drm_device pointer
100b843c749SSergey Zigachev  *
101b843c749SSergey Zigachev  * Returns true if the device is a dGPU with HG/PX power control,
102b843c749SSergey Zigachev  * otherwise return false.
103b843c749SSergey Zigachev  */
amdgpu_device_is_px(struct drm_device * dev)104b843c749SSergey Zigachev bool amdgpu_device_is_px(struct drm_device *dev)
105b843c749SSergey Zigachev {
106b843c749SSergey Zigachev 	struct amdgpu_device *adev = dev->dev_private;
107b843c749SSergey Zigachev 
108b843c749SSergey Zigachev 	if (adev->flags & AMD_IS_PX)
109b843c749SSergey Zigachev 		return true;
110b843c749SSergey Zigachev 	return false;
111b843c749SSergey Zigachev }
112b843c749SSergey Zigachev 
113b843c749SSergey Zigachev /*
114b843c749SSergey Zigachev  * MMIO register access helper functions.
115b843c749SSergey Zigachev  */
116b843c749SSergey Zigachev /**
117b843c749SSergey Zigachev  * amdgpu_mm_rreg - read a memory mapped IO register
118b843c749SSergey Zigachev  *
119b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
120b843c749SSergey Zigachev  * @reg: dword aligned register offset
121b843c749SSergey Zigachev  * @acc_flags: access flags which require special behavior
122b843c749SSergey Zigachev  *
123b843c749SSergey Zigachev  * Returns the 32 bit value from the offset specified.
124b843c749SSergey Zigachev  */
amdgpu_mm_rreg(struct amdgpu_device * adev,uint32_t reg,uint32_t acc_flags)125b843c749SSergey Zigachev uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
126b843c749SSergey Zigachev 			uint32_t acc_flags)
127b843c749SSergey Zigachev {
128b843c749SSergey Zigachev 	uint32_t ret;
129b843c749SSergey Zigachev 
130b843c749SSergey Zigachev 	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
131b843c749SSergey Zigachev 		return amdgpu_virt_kiq_rreg(adev, reg);
132b843c749SSergey Zigachev 
133b843c749SSergey Zigachev 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
134b843c749SSergey Zigachev 		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
135b843c749SSergey Zigachev 	else {
136b843c749SSergey Zigachev 		unsigned long flags;
137b843c749SSergey Zigachev 
138b843c749SSergey Zigachev 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
139b843c749SSergey Zigachev 		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
140b843c749SSergey Zigachev 		ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
141b843c749SSergey Zigachev 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
142b843c749SSergey Zigachev 	}
143b843c749SSergey Zigachev 	trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
144b843c749SSergey Zigachev 	return ret;
145b843c749SSergey Zigachev }
146b843c749SSergey Zigachev 
147b843c749SSergey Zigachev /*
148b843c749SSergey Zigachev  * MMIO register read with bytes helper functions
149b843c749SSergey Zigachev  * @offset:bytes offset from MMIO start
150b843c749SSergey Zigachev  *
151b843c749SSergey Zigachev */
152b843c749SSergey Zigachev 
153b843c749SSergey Zigachev /**
154b843c749SSergey Zigachev  * amdgpu_mm_rreg8 - read a memory mapped IO register
155b843c749SSergey Zigachev  *
156b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
157b843c749SSergey Zigachev  * @offset: byte aligned register offset
158b843c749SSergey Zigachev  *
159b843c749SSergey Zigachev  * Returns the 8 bit value from the offset specified.
160b843c749SSergey Zigachev  */
amdgpu_mm_rreg8(struct amdgpu_device * adev,uint32_t offset)161b843c749SSergey Zigachev uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
162b843c749SSergey Zigachev 	if (offset < adev->rmmio_size)
163b843c749SSergey Zigachev 		return (readb(adev->rmmio + offset));
164b843c749SSergey Zigachev 	BUG();
165b843c749SSergey Zigachev }
166b843c749SSergey Zigachev 
167b843c749SSergey Zigachev /*
168b843c749SSergey Zigachev  * MMIO register write with bytes helper functions
169b843c749SSergey Zigachev  * @offset:bytes offset from MMIO start
170b843c749SSergey Zigachev  * @value: the value want to be written to the register
171b843c749SSergey Zigachev  *
172b843c749SSergey Zigachev */
173b843c749SSergey Zigachev /**
174b843c749SSergey Zigachev  * amdgpu_mm_wreg8 - read a memory mapped IO register
175b843c749SSergey Zigachev  *
176b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
177b843c749SSergey Zigachev  * @offset: byte aligned register offset
178b843c749SSergey Zigachev  * @value: 8 bit value to write
179b843c749SSergey Zigachev  *
180b843c749SSergey Zigachev  * Writes the value specified to the offset specified.
181b843c749SSergey Zigachev  */
amdgpu_mm_wreg8(struct amdgpu_device * adev,uint32_t offset,uint8_t value)182b843c749SSergey Zigachev void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
183b843c749SSergey Zigachev 	if (offset < adev->rmmio_size)
184b843c749SSergey Zigachev 		writeb(value, adev->rmmio + offset);
185b843c749SSergey Zigachev 	else
186b843c749SSergey Zigachev 		BUG();
187b843c749SSergey Zigachev }
188b843c749SSergey Zigachev 
189b843c749SSergey Zigachev /**
190b843c749SSergey Zigachev  * amdgpu_mm_wreg - write to a memory mapped IO register
191b843c749SSergey Zigachev  *
192b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
193b843c749SSergey Zigachev  * @reg: dword aligned register offset
194b843c749SSergey Zigachev  * @v: 32 bit value to write to the register
195b843c749SSergey Zigachev  * @acc_flags: access flags which require special behavior
196b843c749SSergey Zigachev  *
197b843c749SSergey Zigachev  * Writes the value specified to the offset specified.
198b843c749SSergey Zigachev  */
amdgpu_mm_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t acc_flags)199b843c749SSergey Zigachev void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
200b843c749SSergey Zigachev 		    uint32_t acc_flags)
201b843c749SSergey Zigachev {
202b843c749SSergey Zigachev 	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
203b843c749SSergey Zigachev 
204b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
205b843c749SSergey Zigachev 		adev->last_mm_index = v;
206b843c749SSergey Zigachev 	}
207b843c749SSergey Zigachev 
208b843c749SSergey Zigachev 	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
209b843c749SSergey Zigachev 		return amdgpu_virt_kiq_wreg(adev, reg, v);
210b843c749SSergey Zigachev 
211b843c749SSergey Zigachev 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
212b843c749SSergey Zigachev 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
213b843c749SSergey Zigachev 	else {
214b843c749SSergey Zigachev 		unsigned long flags;
215b843c749SSergey Zigachev 
216b843c749SSergey Zigachev 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
217b843c749SSergey Zigachev 		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
218b843c749SSergey Zigachev 		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
219b843c749SSergey Zigachev 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
220b843c749SSergey Zigachev 	}
221b843c749SSergey Zigachev 
222b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
223b843c749SSergey Zigachev 		udelay(500);
224b843c749SSergey Zigachev 	}
225b843c749SSergey Zigachev }
226b843c749SSergey Zigachev 
227b843c749SSergey Zigachev /**
228b843c749SSergey Zigachev  * amdgpu_io_rreg - read an IO register
229b843c749SSergey Zigachev  *
230b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
231b843c749SSergey Zigachev  * @reg: dword aligned register offset
232b843c749SSergey Zigachev  *
233b843c749SSergey Zigachev  * Returns the 32 bit value from the offset specified.
234b843c749SSergey Zigachev  */
amdgpu_io_rreg(struct amdgpu_device * adev,u32 reg)235b843c749SSergey Zigachev u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
236b843c749SSergey Zigachev {
237b843c749SSergey Zigachev 	if ((reg * 4) < adev->rio_mem_size)
23878973132SSergey Zigachev 		return bus_read_4(adev->rio_mem, (reg * 4));
239b843c749SSergey Zigachev 	else {
24078973132SSergey Zigachev 		bus_write_4(adev->rio_mem, (mmMM_INDEX * 4), (reg * 4));
24178973132SSergey Zigachev 		return bus_read_4(adev->rio_mem, (mmMM_DATA * 4));
242b843c749SSergey Zigachev 	}
243b843c749SSergey Zigachev }
244b843c749SSergey Zigachev 
245b843c749SSergey Zigachev /**
246b843c749SSergey Zigachev  * amdgpu_io_wreg - write to an IO register
247b843c749SSergey Zigachev  *
248b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
249b843c749SSergey Zigachev  * @reg: dword aligned register offset
250b843c749SSergey Zigachev  * @v: 32 bit value to write to the register
251b843c749SSergey Zigachev  *
252b843c749SSergey Zigachev  * Writes the value specified to the offset specified.
253b843c749SSergey Zigachev  */
amdgpu_io_wreg(struct amdgpu_device * adev,u32 reg,u32 v)254b843c749SSergey Zigachev void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
255b843c749SSergey Zigachev {
256b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
257b843c749SSergey Zigachev 		adev->last_mm_index = v;
258b843c749SSergey Zigachev 	}
259b843c749SSergey Zigachev 
260b843c749SSergey Zigachev 	if ((reg * 4) < adev->rio_mem_size)
26178973132SSergey Zigachev 		bus_write_4(adev->rio_mem, (reg * 4), v);
262b843c749SSergey Zigachev 	else {
26378973132SSergey Zigachev 		bus_write_4(adev->rio_mem, (mmMM_INDEX * 4), (reg * 4));
26478973132SSergey Zigachev 		bus_write_4(adev->rio_mem, (mmMM_DATA * 4), v);
265b843c749SSergey Zigachev 	}
266b843c749SSergey Zigachev 
267b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
268b843c749SSergey Zigachev 		udelay(500);
269b843c749SSergey Zigachev 	}
270b843c749SSergey Zigachev }
271b843c749SSergey Zigachev 
272b843c749SSergey Zigachev /**
273b843c749SSergey Zigachev  * amdgpu_mm_rdoorbell - read a doorbell dword
274b843c749SSergey Zigachev  *
275b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
276b843c749SSergey Zigachev  * @index: doorbell index
277b843c749SSergey Zigachev  *
278b843c749SSergey Zigachev  * Returns the value in the doorbell aperture at the
279b843c749SSergey Zigachev  * requested doorbell index (CIK).
280b843c749SSergey Zigachev  */
amdgpu_mm_rdoorbell(struct amdgpu_device * adev,u32 index)281b843c749SSergey Zigachev u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
282b843c749SSergey Zigachev {
283b843c749SSergey Zigachev 	if (index < adev->doorbell.num_doorbells) {
284b843c749SSergey Zigachev 		return readl(adev->doorbell.ptr + index);
285b843c749SSergey Zigachev 	} else {
286b843c749SSergey Zigachev 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
287b843c749SSergey Zigachev 		return 0;
288b843c749SSergey Zigachev 	}
289b843c749SSergey Zigachev }
290b843c749SSergey Zigachev 
291b843c749SSergey Zigachev /**
292b843c749SSergey Zigachev  * amdgpu_mm_wdoorbell - write a doorbell dword
293b843c749SSergey Zigachev  *
294b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
295b843c749SSergey Zigachev  * @index: doorbell index
296b843c749SSergey Zigachev  * @v: value to write
297b843c749SSergey Zigachev  *
298b843c749SSergey Zigachev  * Writes @v to the doorbell aperture at the
299b843c749SSergey Zigachev  * requested doorbell index (CIK).
300b843c749SSergey Zigachev  */
amdgpu_mm_wdoorbell(struct amdgpu_device * adev,u32 index,u32 v)301b843c749SSergey Zigachev void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
302b843c749SSergey Zigachev {
303b843c749SSergey Zigachev 	if (index < adev->doorbell.num_doorbells) {
304b843c749SSergey Zigachev 		writel(v, adev->doorbell.ptr + index);
305b843c749SSergey Zigachev 	} else {
306b843c749SSergey Zigachev 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
307b843c749SSergey Zigachev 	}
308b843c749SSergey Zigachev }
309b843c749SSergey Zigachev 
310b843c749SSergey Zigachev /**
311b843c749SSergey Zigachev  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
312b843c749SSergey Zigachev  *
313b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
314b843c749SSergey Zigachev  * @index: doorbell index
315b843c749SSergey Zigachev  *
316b843c749SSergey Zigachev  * Returns the value in the doorbell aperture at the
317b843c749SSergey Zigachev  * requested doorbell index (VEGA10+).
318b843c749SSergey Zigachev  */
amdgpu_mm_rdoorbell64(struct amdgpu_device * adev,u32 index)319b843c749SSergey Zigachev u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
320b843c749SSergey Zigachev {
321b843c749SSergey Zigachev 	if (index < adev->doorbell.num_doorbells) {
322b843c749SSergey Zigachev 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
323b843c749SSergey Zigachev 	} else {
324b843c749SSergey Zigachev 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
325b843c749SSergey Zigachev 		return 0;
326b843c749SSergey Zigachev 	}
327b843c749SSergey Zigachev }
328b843c749SSergey Zigachev 
329b843c749SSergey Zigachev /**
330b843c749SSergey Zigachev  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
331b843c749SSergey Zigachev  *
332b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
333b843c749SSergey Zigachev  * @index: doorbell index
334b843c749SSergey Zigachev  * @v: value to write
335b843c749SSergey Zigachev  *
336b843c749SSergey Zigachev  * Writes @v to the doorbell aperture at the
337b843c749SSergey Zigachev  * requested doorbell index (VEGA10+).
338b843c749SSergey Zigachev  */
amdgpu_mm_wdoorbell64(struct amdgpu_device * adev,u32 index,u64 v)339b843c749SSergey Zigachev void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
340b843c749SSergey Zigachev {
341b843c749SSergey Zigachev 	if (index < adev->doorbell.num_doorbells) {
342b843c749SSergey Zigachev 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
343b843c749SSergey Zigachev 	} else {
344b843c749SSergey Zigachev 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
345b843c749SSergey Zigachev 	}
346b843c749SSergey Zigachev }
347b843c749SSergey Zigachev 
348b843c749SSergey Zigachev /**
349b843c749SSergey Zigachev  * amdgpu_invalid_rreg - dummy reg read function
350b843c749SSergey Zigachev  *
351b843c749SSergey Zigachev  * @adev: amdgpu device pointer
352b843c749SSergey Zigachev  * @reg: offset of register
353b843c749SSergey Zigachev  *
354b843c749SSergey Zigachev  * Dummy register read function.  Used for register blocks
355b843c749SSergey Zigachev  * that certain asics don't have (all asics).
356b843c749SSergey Zigachev  * Returns the value in the register.
357b843c749SSergey Zigachev  */
amdgpu_invalid_rreg(struct amdgpu_device * adev,uint32_t reg)358b843c749SSergey Zigachev static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
359b843c749SSergey Zigachev {
360b843c749SSergey Zigachev 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
361b843c749SSergey Zigachev 	BUG();
362b843c749SSergey Zigachev 	return 0;
363b843c749SSergey Zigachev }
364b843c749SSergey Zigachev 
365b843c749SSergey Zigachev /**
366b843c749SSergey Zigachev  * amdgpu_invalid_wreg - dummy reg write function
367b843c749SSergey Zigachev  *
368b843c749SSergey Zigachev  * @adev: amdgpu device pointer
369b843c749SSergey Zigachev  * @reg: offset of register
370b843c749SSergey Zigachev  * @v: value to write to the register
371b843c749SSergey Zigachev  *
372b843c749SSergey Zigachev  * Dummy register read function.  Used for register blocks
373b843c749SSergey Zigachev  * that certain asics don't have (all asics).
374b843c749SSergey Zigachev  */
amdgpu_invalid_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v)375b843c749SSergey Zigachev static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
376b843c749SSergey Zigachev {
377b843c749SSergey Zigachev 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
378b843c749SSergey Zigachev 		  reg, v);
379b843c749SSergey Zigachev 	BUG();
380b843c749SSergey Zigachev }
381b843c749SSergey Zigachev 
382b843c749SSergey Zigachev /**
383b843c749SSergey Zigachev  * amdgpu_block_invalid_rreg - dummy reg read function
384b843c749SSergey Zigachev  *
385b843c749SSergey Zigachev  * @adev: amdgpu device pointer
386b843c749SSergey Zigachev  * @block: offset of instance
387b843c749SSergey Zigachev  * @reg: offset of register
388b843c749SSergey Zigachev  *
389b843c749SSergey Zigachev  * Dummy register read function.  Used for register blocks
390b843c749SSergey Zigachev  * that certain asics don't have (all asics).
391b843c749SSergey Zigachev  * Returns the value in the register.
392b843c749SSergey Zigachev  */
amdgpu_block_invalid_rreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg)393b843c749SSergey Zigachev static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
394b843c749SSergey Zigachev 					  uint32_t block, uint32_t reg)
395b843c749SSergey Zigachev {
396b843c749SSergey Zigachev 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
397b843c749SSergey Zigachev 		  reg, block);
398b843c749SSergey Zigachev 	BUG();
399b843c749SSergey Zigachev 	return 0;
400b843c749SSergey Zigachev }
401b843c749SSergey Zigachev 
402b843c749SSergey Zigachev /**
403b843c749SSergey Zigachev  * amdgpu_block_invalid_wreg - dummy reg write function
404b843c749SSergey Zigachev  *
405b843c749SSergey Zigachev  * @adev: amdgpu device pointer
406b843c749SSergey Zigachev  * @block: offset of instance
407b843c749SSergey Zigachev  * @reg: offset of register
408b843c749SSergey Zigachev  * @v: value to write to the register
409b843c749SSergey Zigachev  *
410b843c749SSergey Zigachev  * Dummy register read function.  Used for register blocks
411b843c749SSergey Zigachev  * that certain asics don't have (all asics).
412b843c749SSergey Zigachev  */
amdgpu_block_invalid_wreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg,uint32_t v)413b843c749SSergey Zigachev static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
414b843c749SSergey Zigachev 				      uint32_t block,
415b843c749SSergey Zigachev 				      uint32_t reg, uint32_t v)
416b843c749SSergey Zigachev {
417b843c749SSergey Zigachev 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
418b843c749SSergey Zigachev 		  reg, block, v);
419b843c749SSergey Zigachev 	BUG();
420b843c749SSergey Zigachev }
421b843c749SSergey Zigachev 
422b843c749SSergey Zigachev /**
423b843c749SSergey Zigachev  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
424b843c749SSergey Zigachev  *
425b843c749SSergey Zigachev  * @adev: amdgpu device pointer
426b843c749SSergey Zigachev  *
427b843c749SSergey Zigachev  * Allocates a scratch page of VRAM for use by various things in the
428b843c749SSergey Zigachev  * driver.
429b843c749SSergey Zigachev  */
amdgpu_device_vram_scratch_init(struct amdgpu_device * adev)430b843c749SSergey Zigachev static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
431b843c749SSergey Zigachev {
432b843c749SSergey Zigachev 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
433b843c749SSergey Zigachev 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
434b843c749SSergey Zigachev 				       &adev->vram_scratch.robj,
435b843c749SSergey Zigachev 				       &adev->vram_scratch.gpu_addr,
436b843c749SSergey Zigachev 				       (void **)&adev->vram_scratch.ptr);
437b843c749SSergey Zigachev }
438b843c749SSergey Zigachev 
439b843c749SSergey Zigachev /**
440b843c749SSergey Zigachev  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
441b843c749SSergey Zigachev  *
442b843c749SSergey Zigachev  * @adev: amdgpu device pointer
443b843c749SSergey Zigachev  *
444b843c749SSergey Zigachev  * Frees the VRAM scratch page.
445b843c749SSergey Zigachev  */
amdgpu_device_vram_scratch_fini(struct amdgpu_device * adev)446b843c749SSergey Zigachev static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
447b843c749SSergey Zigachev {
448b843c749SSergey Zigachev 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
449b843c749SSergey Zigachev }
450b843c749SSergey Zigachev 
451b843c749SSergey Zigachev /**
452b843c749SSergey Zigachev  * amdgpu_device_program_register_sequence - program an array of registers.
453b843c749SSergey Zigachev  *
454b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
455b843c749SSergey Zigachev  * @registers: pointer to the register array
456b843c749SSergey Zigachev  * @array_size: size of the register array
457b843c749SSergey Zigachev  *
458b843c749SSergey Zigachev  * Programs an array or registers with and and or masks.
459b843c749SSergey Zigachev  * This is a helper for setting golden registers.
460b843c749SSergey Zigachev  */
amdgpu_device_program_register_sequence(struct amdgpu_device * adev,const u32 * registers,const u32 array_size)461b843c749SSergey Zigachev void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
462b843c749SSergey Zigachev 					     const u32 *registers,
463b843c749SSergey Zigachev 					     const u32 array_size)
464b843c749SSergey Zigachev {
465b843c749SSergey Zigachev 	u32 tmp, reg, and_mask, or_mask;
466b843c749SSergey Zigachev 	int i;
467b843c749SSergey Zigachev 
468b843c749SSergey Zigachev 	if (array_size % 3)
469b843c749SSergey Zigachev 		return;
470b843c749SSergey Zigachev 
471b843c749SSergey Zigachev 	for (i = 0; i < array_size; i +=3) {
472b843c749SSergey Zigachev 		reg = registers[i + 0];
473b843c749SSergey Zigachev 		and_mask = registers[i + 1];
474b843c749SSergey Zigachev 		or_mask = registers[i + 2];
475b843c749SSergey Zigachev 
476b843c749SSergey Zigachev 		if (and_mask == 0xffffffff) {
477b843c749SSergey Zigachev 			tmp = or_mask;
478b843c749SSergey Zigachev 		} else {
479b843c749SSergey Zigachev 			tmp = RREG32(reg);
480b843c749SSergey Zigachev 			tmp &= ~and_mask;
481b843c749SSergey Zigachev 			tmp |= or_mask;
482b843c749SSergey Zigachev 		}
483b843c749SSergey Zigachev 		WREG32(reg, tmp);
484b843c749SSergey Zigachev 	}
485b843c749SSergey Zigachev }
486b843c749SSergey Zigachev 
487b843c749SSergey Zigachev /**
488b843c749SSergey Zigachev  * amdgpu_device_pci_config_reset - reset the GPU
489b843c749SSergey Zigachev  *
490b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
491b843c749SSergey Zigachev  *
492b843c749SSergey Zigachev  * Resets the GPU using the pci config reset sequence.
493b843c749SSergey Zigachev  * Only applicable to asics prior to vega10.
494b843c749SSergey Zigachev  */
amdgpu_device_pci_config_reset(struct amdgpu_device * adev)495b843c749SSergey Zigachev void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
496b843c749SSergey Zigachev {
497b843c749SSergey Zigachev 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
498b843c749SSergey Zigachev }
499b843c749SSergey Zigachev 
500b843c749SSergey Zigachev /*
501b843c749SSergey Zigachev  * GPU doorbell aperture helpers function.
502b843c749SSergey Zigachev  */
503b843c749SSergey Zigachev /**
504b843c749SSergey Zigachev  * amdgpu_device_doorbell_init - Init doorbell driver information.
505b843c749SSergey Zigachev  *
506b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
507b843c749SSergey Zigachev  *
508b843c749SSergey Zigachev  * Init doorbell driver information (CIK)
509b843c749SSergey Zigachev  * Returns 0 on success, error on failure.
510b843c749SSergey Zigachev  */
amdgpu_device_doorbell_init(struct amdgpu_device * adev)511b843c749SSergey Zigachev static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
512b843c749SSergey Zigachev {
513b843c749SSergey Zigachev 	/* No doorbell on SI hardware generation */
514b843c749SSergey Zigachev 	if (adev->asic_type < CHIP_BONAIRE) {
515b843c749SSergey Zigachev 		adev->doorbell.base = 0;
516b843c749SSergey Zigachev 		adev->doorbell.size = 0;
517b843c749SSergey Zigachev 		adev->doorbell.num_doorbells = 0;
518b843c749SSergey Zigachev 		adev->doorbell.ptr = NULL;
519b843c749SSergey Zigachev 		return 0;
520b843c749SSergey Zigachev 	}
521b843c749SSergey Zigachev 
522b843c749SSergey Zigachev 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
523b843c749SSergey Zigachev 		return -EINVAL;
524b843c749SSergey Zigachev 
525b843c749SSergey Zigachev 	/* doorbell bar mapping */
526b843c749SSergey Zigachev 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
527b843c749SSergey Zigachev 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
528b843c749SSergey Zigachev 
529b843c749SSergey Zigachev 	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
530b843c749SSergey Zigachev 					     AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
531b843c749SSergey Zigachev 	if (adev->doorbell.num_doorbells == 0)
532b843c749SSergey Zigachev 		return -EINVAL;
533b843c749SSergey Zigachev 
534b843c749SSergey Zigachev 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
535b843c749SSergey Zigachev 				     adev->doorbell.num_doorbells *
536b843c749SSergey Zigachev 				     sizeof(u32));
537b843c749SSergey Zigachev 	if (adev->doorbell.ptr == NULL)
538b843c749SSergey Zigachev 		return -ENOMEM;
539b843c749SSergey Zigachev 
540b843c749SSergey Zigachev 	return 0;
541b843c749SSergey Zigachev }
542b843c749SSergey Zigachev 
543b843c749SSergey Zigachev /**
544b843c749SSergey Zigachev  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
545b843c749SSergey Zigachev  *
546b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
547b843c749SSergey Zigachev  *
548b843c749SSergey Zigachev  * Tear down doorbell driver information (CIK)
549b843c749SSergey Zigachev  */
amdgpu_device_doorbell_fini(struct amdgpu_device * adev)550b843c749SSergey Zigachev static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
551b843c749SSergey Zigachev {
552b843c749SSergey Zigachev 	iounmap(adev->doorbell.ptr);
553b843c749SSergey Zigachev 	adev->doorbell.ptr = NULL;
554b843c749SSergey Zigachev }
555b843c749SSergey Zigachev 
556b843c749SSergey Zigachev 
557b843c749SSergey Zigachev 
558b843c749SSergey Zigachev /*
559b843c749SSergey Zigachev  * amdgpu_device_wb_*()
560b843c749SSergey Zigachev  * Writeback is the method by which the GPU updates special pages in memory
561b843c749SSergey Zigachev  * with the status of certain GPU events (fences, ring pointers,etc.).
562b843c749SSergey Zigachev  */
563b843c749SSergey Zigachev 
564b843c749SSergey Zigachev /**
565b843c749SSergey Zigachev  * amdgpu_device_wb_fini - Disable Writeback and free memory
566b843c749SSergey Zigachev  *
567b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
568b843c749SSergey Zigachev  *
569b843c749SSergey Zigachev  * Disables Writeback and frees the Writeback memory (all asics).
570b843c749SSergey Zigachev  * Used at driver shutdown.
571b843c749SSergey Zigachev  */
amdgpu_device_wb_fini(struct amdgpu_device * adev)572b843c749SSergey Zigachev static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
573b843c749SSergey Zigachev {
574b843c749SSergey Zigachev 	if (adev->wb.wb_obj) {
575b843c749SSergey Zigachev 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
57678973132SSergey Zigachev 				      (u64 *)&adev->wb.gpu_addr,
577b843c749SSergey Zigachev 				      (void **)&adev->wb.wb);
578b843c749SSergey Zigachev 		adev->wb.wb_obj = NULL;
579b843c749SSergey Zigachev 	}
580b843c749SSergey Zigachev }
581b843c749SSergey Zigachev 
582b843c749SSergey Zigachev /**
583b843c749SSergey Zigachev  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
584b843c749SSergey Zigachev  *
585b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
586b843c749SSergey Zigachev  *
587b843c749SSergey Zigachev  * Initializes writeback and allocates writeback memory (all asics).
588b843c749SSergey Zigachev  * Used at driver startup.
589b843c749SSergey Zigachev  * Returns 0 on success or an -error on failure.
590b843c749SSergey Zigachev  */
amdgpu_device_wb_init(struct amdgpu_device * adev)591b843c749SSergey Zigachev static int amdgpu_device_wb_init(struct amdgpu_device *adev)
592b843c749SSergey Zigachev {
593b843c749SSergey Zigachev 	int r;
594b843c749SSergey Zigachev 
595b843c749SSergey Zigachev 	if (adev->wb.wb_obj == NULL) {
596b843c749SSergey Zigachev 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
597b843c749SSergey Zigachev 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
598b843c749SSergey Zigachev 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
59978973132SSergey Zigachev 					    &adev->wb.wb_obj, (u64 *)&adev->wb.gpu_addr,
600b843c749SSergey Zigachev 					    (void **)&adev->wb.wb);
601b843c749SSergey Zigachev 		if (r) {
602b843c749SSergey Zigachev 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
603b843c749SSergey Zigachev 			return r;
604b843c749SSergey Zigachev 		}
605b843c749SSergey Zigachev 
606b843c749SSergey Zigachev 		adev->wb.num_wb = AMDGPU_MAX_WB;
607b843c749SSergey Zigachev 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
608b843c749SSergey Zigachev 
609b843c749SSergey Zigachev 		/* clear wb memory */
610b843c749SSergey Zigachev 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
611b843c749SSergey Zigachev 	}
612b843c749SSergey Zigachev 
613b843c749SSergey Zigachev 	return 0;
614b843c749SSergey Zigachev }
615b843c749SSergey Zigachev 
616b843c749SSergey Zigachev /**
617b843c749SSergey Zigachev  * amdgpu_device_wb_get - Allocate a wb entry
618b843c749SSergey Zigachev  *
619b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
620b843c749SSergey Zigachev  * @wb: wb index
621b843c749SSergey Zigachev  *
622b843c749SSergey Zigachev  * Allocate a wb slot for use by the driver (all asics).
623b843c749SSergey Zigachev  * Returns 0 on success or -EINVAL on failure.
624b843c749SSergey Zigachev  */
amdgpu_device_wb_get(struct amdgpu_device * adev,u32 * wb)625b843c749SSergey Zigachev int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
626b843c749SSergey Zigachev {
627b843c749SSergey Zigachev 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
628b843c749SSergey Zigachev 
629b843c749SSergey Zigachev 	if (offset < adev->wb.num_wb) {
630b843c749SSergey Zigachev 		__set_bit(offset, adev->wb.used);
631b843c749SSergey Zigachev 		*wb = offset << 3; /* convert to dw offset */
632b843c749SSergey Zigachev 		return 0;
633b843c749SSergey Zigachev 	} else {
634b843c749SSergey Zigachev 		return -EINVAL;
635b843c749SSergey Zigachev 	}
636b843c749SSergey Zigachev }
637b843c749SSergey Zigachev 
638b843c749SSergey Zigachev /**
639b843c749SSergey Zigachev  * amdgpu_device_wb_free - Free a wb entry
640b843c749SSergey Zigachev  *
641b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
642b843c749SSergey Zigachev  * @wb: wb index
643b843c749SSergey Zigachev  *
644b843c749SSergey Zigachev  * Free a wb slot allocated for use by the driver (all asics)
645b843c749SSergey Zigachev  */
amdgpu_device_wb_free(struct amdgpu_device * adev,u32 wb)646b843c749SSergey Zigachev void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
647b843c749SSergey Zigachev {
648b843c749SSergey Zigachev 	wb >>= 3;
649b843c749SSergey Zigachev 	if (wb < adev->wb.num_wb)
650b843c749SSergey Zigachev 		__clear_bit(wb, adev->wb.used);
651b843c749SSergey Zigachev }
652b843c749SSergey Zigachev 
653b843c749SSergey Zigachev /**
654b843c749SSergey Zigachev  * amdgpu_device_vram_location - try to find VRAM location
655b843c749SSergey Zigachev  *
656b843c749SSergey Zigachev  * @adev: amdgpu device structure holding all necessary informations
657b843c749SSergey Zigachev  * @mc: memory controller structure holding memory informations
658b843c749SSergey Zigachev  * @base: base address at which to put VRAM
659b843c749SSergey Zigachev  *
660b843c749SSergey Zigachev  * Function will try to place VRAM at base address provided
661b843c749SSergey Zigachev  * as parameter.
662b843c749SSergey Zigachev  */
amdgpu_device_vram_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc,u64 base)663b843c749SSergey Zigachev void amdgpu_device_vram_location(struct amdgpu_device *adev,
664b843c749SSergey Zigachev 				 struct amdgpu_gmc *mc, u64 base)
665b843c749SSergey Zigachev {
666b843c749SSergey Zigachev 	uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
667b843c749SSergey Zigachev 
668b843c749SSergey Zigachev 	mc->vram_start = base;
669b843c749SSergey Zigachev 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
670b843c749SSergey Zigachev 	if (limit && limit < mc->real_vram_size)
671b843c749SSergey Zigachev 		mc->real_vram_size = limit;
672b843c749SSergey Zigachev 	dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
673b843c749SSergey Zigachev 			mc->mc_vram_size >> 20, mc->vram_start,
674b843c749SSergey Zigachev 			mc->vram_end, mc->real_vram_size >> 20);
675b843c749SSergey Zigachev }
676b843c749SSergey Zigachev 
677b843c749SSergey Zigachev /**
678b843c749SSergey Zigachev  * amdgpu_device_gart_location - try to find GART location
679b843c749SSergey Zigachev  *
680b843c749SSergey Zigachev  * @adev: amdgpu device structure holding all necessary informations
681b843c749SSergey Zigachev  * @mc: memory controller structure holding memory informations
682b843c749SSergey Zigachev  *
683b843c749SSergey Zigachev  * Function will place try to place GART before or after VRAM.
684b843c749SSergey Zigachev  *
685b843c749SSergey Zigachev  * If GART size is bigger than space left then we ajust GART size.
686b843c749SSergey Zigachev  * Thus function will never fails.
687b843c749SSergey Zigachev  */
amdgpu_device_gart_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)688b843c749SSergey Zigachev void amdgpu_device_gart_location(struct amdgpu_device *adev,
689b843c749SSergey Zigachev 				 struct amdgpu_gmc *mc)
690b843c749SSergey Zigachev {
691b843c749SSergey Zigachev 	u64 size_af, size_bf;
692b843c749SSergey Zigachev 
693b843c749SSergey Zigachev 	mc->gart_size += adev->pm.smu_prv_buffer_size;
694b843c749SSergey Zigachev 
695b843c749SSergey Zigachev 	size_af = adev->gmc.mc_mask - mc->vram_end;
696b843c749SSergey Zigachev 	size_bf = mc->vram_start;
697b843c749SSergey Zigachev 	if (size_bf > size_af) {
698b843c749SSergey Zigachev 		if (mc->gart_size > size_bf) {
699b843c749SSergey Zigachev 			dev_warn(adev->dev, "limiting GART\n");
700b843c749SSergey Zigachev 			mc->gart_size = size_bf;
701b843c749SSergey Zigachev 		}
702b843c749SSergey Zigachev 		mc->gart_start = 0;
703b843c749SSergey Zigachev 	} else {
704b843c749SSergey Zigachev 		if (mc->gart_size > size_af) {
705b843c749SSergey Zigachev 			dev_warn(adev->dev, "limiting GART\n");
706b843c749SSergey Zigachev 			mc->gart_size = size_af;
707b843c749SSergey Zigachev 		}
708b843c749SSergey Zigachev 		/* VCE doesn't like it when BOs cross a 4GB segment, so align
709b843c749SSergey Zigachev 		 * the GART base on a 4GB boundary as well.
710b843c749SSergey Zigachev 		 */
711b843c749SSergey Zigachev 		mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
712b843c749SSergey Zigachev 	}
713b843c749SSergey Zigachev 	mc->gart_end = mc->gart_start + mc->gart_size - 1;
714b843c749SSergey Zigachev 	dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
715b843c749SSergey Zigachev 			mc->gart_size >> 20, mc->gart_start, mc->gart_end);
716b843c749SSergey Zigachev }
717b843c749SSergey Zigachev 
718b843c749SSergey Zigachev /**
719b843c749SSergey Zigachev  * amdgpu_device_resize_fb_bar - try to resize FB BAR
720b843c749SSergey Zigachev  *
721b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
722b843c749SSergey Zigachev  *
723b843c749SSergey Zigachev  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
724b843c749SSergey Zigachev  * to fail, but if any of the BARs is not accessible after the size we abort
725b843c749SSergey Zigachev  * driver loading by returning -ENODEV.
726b843c749SSergey Zigachev  */
amdgpu_device_resize_fb_bar(struct amdgpu_device * adev)727b843c749SSergey Zigachev int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
728b843c749SSergey Zigachev {
72978973132SSergey Zigachev 	kprintf("amdgpu_device_resize_fb_bar: not implemented\n");
73078973132SSergey Zigachev 	return 0;
73178973132SSergey Zigachev #if 0
732b843c749SSergey Zigachev 	u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
733b843c749SSergey Zigachev 	u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
734b843c749SSergey Zigachev 	struct pci_bus *root;
735b843c749SSergey Zigachev 	struct resource *res;
736b843c749SSergey Zigachev 	unsigned i;
737b843c749SSergey Zigachev 	u16 cmd;
738b843c749SSergey Zigachev 	int r;
739b843c749SSergey Zigachev 
740b843c749SSergey Zigachev 	/* Bypass for VF */
741b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
742b843c749SSergey Zigachev 		return 0;
743b843c749SSergey Zigachev 
744b843c749SSergey Zigachev 	/* Check if the root BUS has 64bit memory resources */
745b843c749SSergey Zigachev 	root = adev->pdev->bus;
746b843c749SSergey Zigachev 	while (root->parent)
747b843c749SSergey Zigachev 		root = root->parent;
748b843c749SSergey Zigachev 
749b843c749SSergey Zigachev 	pci_bus_for_each_resource(root, res, i) {
750b843c749SSergey Zigachev 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
751b843c749SSergey Zigachev 		    res->start > 0x100000000ull)
752b843c749SSergey Zigachev 			break;
753b843c749SSergey Zigachev 	}
754b843c749SSergey Zigachev 
755b843c749SSergey Zigachev 	/* Trying to resize is pointless without a root hub window above 4GB */
756b843c749SSergey Zigachev 	if (!res)
757b843c749SSergey Zigachev 		return 0;
758b843c749SSergey Zigachev 
759b843c749SSergey Zigachev 	/* Disable memory decoding while we change the BAR addresses and size */
760b843c749SSergey Zigachev 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
761b843c749SSergey Zigachev 	pci_write_config_word(adev->pdev, PCI_COMMAND,
762b843c749SSergey Zigachev 			      cmd & ~PCI_COMMAND_MEMORY);
763b843c749SSergey Zigachev 
764b843c749SSergey Zigachev 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
765b843c749SSergey Zigachev 	amdgpu_device_doorbell_fini(adev);
766b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_BONAIRE)
767b843c749SSergey Zigachev 		pci_release_resource(adev->pdev, 2);
768b843c749SSergey Zigachev 
769b843c749SSergey Zigachev 	pci_release_resource(adev->pdev, 0);
770b843c749SSergey Zigachev 
771b843c749SSergey Zigachev 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
772b843c749SSergey Zigachev 	if (r == -ENOSPC)
773b843c749SSergey Zigachev 		DRM_INFO("Not enough PCI address space for a large BAR.");
774b843c749SSergey Zigachev 	else if (r && r != -ENOTSUPP)
775b843c749SSergey Zigachev 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
776b843c749SSergey Zigachev 
777b843c749SSergey Zigachev 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
778b843c749SSergey Zigachev 
779b843c749SSergey Zigachev 	/* When the doorbell or fb BAR isn't available we have no chance of
780b843c749SSergey Zigachev 	 * using the device.
781b843c749SSergey Zigachev 	 */
782b843c749SSergey Zigachev 	r = amdgpu_device_doorbell_init(adev);
783b843c749SSergey Zigachev 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
784b843c749SSergey Zigachev 		return -ENODEV;
785b843c749SSergey Zigachev 
786b843c749SSergey Zigachev 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
787b843c749SSergey Zigachev 
788b843c749SSergey Zigachev 	return 0;
78978973132SSergey Zigachev #endif
790b843c749SSergey Zigachev }
791b843c749SSergey Zigachev 
792b843c749SSergey Zigachev /*
793b843c749SSergey Zigachev  * GPU helpers function.
794b843c749SSergey Zigachev  */
795b843c749SSergey Zigachev /**
796b843c749SSergey Zigachev  * amdgpu_device_need_post - check if the hw need post or not
797b843c749SSergey Zigachev  *
798b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
799b843c749SSergey Zigachev  *
800b843c749SSergey Zigachev  * Check if the asic has been initialized (all asics) at driver startup
801b843c749SSergey Zigachev  * or post is needed if  hw reset is performed.
802b843c749SSergey Zigachev  * Returns true if need or false if not.
803b843c749SSergey Zigachev  */
amdgpu_device_need_post(struct amdgpu_device * adev)804b843c749SSergey Zigachev bool amdgpu_device_need_post(struct amdgpu_device *adev)
805b843c749SSergey Zigachev {
806b843c749SSergey Zigachev 	uint32_t reg;
807b843c749SSergey Zigachev 
808b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
809b843c749SSergey Zigachev 		return false;
810b843c749SSergey Zigachev 
811b843c749SSergey Zigachev 	if (amdgpu_passthrough(adev)) {
812b843c749SSergey Zigachev 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
813b843c749SSergey Zigachev 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
814b843c749SSergey Zigachev 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
815b843c749SSergey Zigachev 		 * vpost executed for smc version below 22.15
816b843c749SSergey Zigachev 		 */
817b843c749SSergey Zigachev 		if (adev->asic_type == CHIP_FIJI) {
818b843c749SSergey Zigachev 			int err;
819b843c749SSergey Zigachev 			uint32_t fw_ver;
820*809f3802SSergey Zigachev 			err = request_firmware(&adev->pm.fw, "amdgpufw_fiji_smc", adev->dev);
821b843c749SSergey Zigachev 			/* force vPost if error occured */
822b843c749SSergey Zigachev 			if (err)
823b843c749SSergey Zigachev 				return true;
824b843c749SSergey Zigachev 
825b843c749SSergey Zigachev 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
826b843c749SSergey Zigachev 			if (fw_ver < 0x00160e00)
827b843c749SSergey Zigachev 				return true;
828b843c749SSergey Zigachev 		}
829b843c749SSergey Zigachev 	}
830b843c749SSergey Zigachev 
831b843c749SSergey Zigachev 	if (adev->has_hw_reset) {
832b843c749SSergey Zigachev 		adev->has_hw_reset = false;
833b843c749SSergey Zigachev 		return true;
834b843c749SSergey Zigachev 	}
835b843c749SSergey Zigachev 
836b843c749SSergey Zigachev 	/* bios scratch used on CIK+ */
837b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_BONAIRE)
838b843c749SSergey Zigachev 		return amdgpu_atombios_scratch_need_asic_init(adev);
839b843c749SSergey Zigachev 
840b843c749SSergey Zigachev 	/* check MEM_SIZE for older asics */
841b843c749SSergey Zigachev 	reg = amdgpu_asic_get_config_memsize(adev);
842b843c749SSergey Zigachev 
843b843c749SSergey Zigachev 	if ((reg != 0) && (reg != 0xffffffff))
844b843c749SSergey Zigachev 		return false;
845b843c749SSergey Zigachev 
846b843c749SSergey Zigachev 	return true;
847b843c749SSergey Zigachev }
848b843c749SSergey Zigachev 
84978973132SSergey Zigachev #if 0
850b843c749SSergey Zigachev /* if we get transitioned to only one device, take VGA back */
851b843c749SSergey Zigachev /**
852b843c749SSergey Zigachev  * amdgpu_device_vga_set_decode - enable/disable vga decode
853b843c749SSergey Zigachev  *
854b843c749SSergey Zigachev  * @cookie: amdgpu_device pointer
855b843c749SSergey Zigachev  * @state: enable/disable vga decode
856b843c749SSergey Zigachev  *
857b843c749SSergey Zigachev  * Enable/disable vga decode (all asics).
858b843c749SSergey Zigachev  * Returns VGA resource flags.
859b843c749SSergey Zigachev  */
860b843c749SSergey Zigachev static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
861b843c749SSergey Zigachev {
862b843c749SSergey Zigachev 	struct amdgpu_device *adev = cookie;
863b843c749SSergey Zigachev 	amdgpu_asic_set_vga_state(adev, state);
864b843c749SSergey Zigachev 	if (state)
865b843c749SSergey Zigachev 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
866b843c749SSergey Zigachev 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
867b843c749SSergey Zigachev 	else
868b843c749SSergey Zigachev 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
869b843c749SSergey Zigachev }
87078973132SSergey Zigachev #endif
871b843c749SSergey Zigachev 
872b843c749SSergey Zigachev /**
873b843c749SSergey Zigachev  * amdgpu_device_check_block_size - validate the vm block size
874b843c749SSergey Zigachev  *
875b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
876b843c749SSergey Zigachev  *
877b843c749SSergey Zigachev  * Validates the vm block size specified via module parameter.
878b843c749SSergey Zigachev  * The vm block size defines number of bits in page table versus page directory,
879b843c749SSergey Zigachev  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
880b843c749SSergey Zigachev  * page table and the remaining bits are in the page directory.
881b843c749SSergey Zigachev  */
amdgpu_device_check_block_size(struct amdgpu_device * adev)882b843c749SSergey Zigachev static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
883b843c749SSergey Zigachev {
884b843c749SSergey Zigachev 	/* defines number of bits in page table versus page directory,
885b843c749SSergey Zigachev 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
886b843c749SSergey Zigachev 	 * page table and the remaining bits are in the page directory */
887b843c749SSergey Zigachev 	if (amdgpu_vm_block_size == -1)
888b843c749SSergey Zigachev 		return;
889b843c749SSergey Zigachev 
890b843c749SSergey Zigachev 	if (amdgpu_vm_block_size < 9) {
891b843c749SSergey Zigachev 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
892b843c749SSergey Zigachev 			 amdgpu_vm_block_size);
893b843c749SSergey Zigachev 		amdgpu_vm_block_size = -1;
894b843c749SSergey Zigachev 	}
895b843c749SSergey Zigachev }
896b843c749SSergey Zigachev 
897b843c749SSergey Zigachev /**
898b843c749SSergey Zigachev  * amdgpu_device_check_vm_size - validate the vm size
899b843c749SSergey Zigachev  *
900b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
901b843c749SSergey Zigachev  *
902b843c749SSergey Zigachev  * Validates the vm size in GB specified via module parameter.
903b843c749SSergey Zigachev  * The VM size is the size of the GPU virtual memory space in GB.
904b843c749SSergey Zigachev  */
amdgpu_device_check_vm_size(struct amdgpu_device * adev)905b843c749SSergey Zigachev static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
906b843c749SSergey Zigachev {
907b843c749SSergey Zigachev 	/* no need to check the default value */
908b843c749SSergey Zigachev 	if (amdgpu_vm_size == -1)
909b843c749SSergey Zigachev 		return;
910b843c749SSergey Zigachev 
911b843c749SSergey Zigachev 	if (amdgpu_vm_size < 1) {
912b843c749SSergey Zigachev 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
913b843c749SSergey Zigachev 			 amdgpu_vm_size);
914b843c749SSergey Zigachev 		amdgpu_vm_size = -1;
915b843c749SSergey Zigachev 	}
916b843c749SSergey Zigachev }
917b843c749SSergey Zigachev 
amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device * adev)918b843c749SSergey Zigachev static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
919b843c749SSergey Zigachev {
920b843c749SSergey Zigachev 	struct sysinfo si;
921b843c749SSergey Zigachev 	bool is_os_64 = (sizeof(void *) == 8) ? true : false;
922b843c749SSergey Zigachev 	uint64_t total_memory;
923b843c749SSergey Zigachev 	uint64_t dram_size_seven_GB = 0x1B8000000;
924b843c749SSergey Zigachev 	uint64_t dram_size_three_GB = 0xB8000000;
925b843c749SSergey Zigachev 
926b843c749SSergey Zigachev 	if (amdgpu_smu_memory_pool_size == 0)
927b843c749SSergey Zigachev 		return;
928b843c749SSergey Zigachev 
929b843c749SSergey Zigachev 	if (!is_os_64) {
930b843c749SSergey Zigachev 		DRM_WARN("Not 64-bit OS, feature not supported\n");
931b843c749SSergey Zigachev 		goto def_value;
932b843c749SSergey Zigachev 	}
933b843c749SSergey Zigachev 	si_meminfo(&si);
934b843c749SSergey Zigachev 	total_memory = (uint64_t)si.totalram * si.mem_unit;
935b843c749SSergey Zigachev 
936b843c749SSergey Zigachev 	if ((amdgpu_smu_memory_pool_size == 1) ||
937b843c749SSergey Zigachev 		(amdgpu_smu_memory_pool_size == 2)) {
938b843c749SSergey Zigachev 		if (total_memory < dram_size_three_GB)
939b843c749SSergey Zigachev 			goto def_value1;
940b843c749SSergey Zigachev 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
941b843c749SSergey Zigachev 		(amdgpu_smu_memory_pool_size == 8)) {
942b843c749SSergey Zigachev 		if (total_memory < dram_size_seven_GB)
943b843c749SSergey Zigachev 			goto def_value1;
944b843c749SSergey Zigachev 	} else {
945b843c749SSergey Zigachev 		DRM_WARN("Smu memory pool size not supported\n");
946b843c749SSergey Zigachev 		goto def_value;
947b843c749SSergey Zigachev 	}
948b843c749SSergey Zigachev 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
949b843c749SSergey Zigachev 
950b843c749SSergey Zigachev 	return;
951b843c749SSergey Zigachev 
952b843c749SSergey Zigachev def_value1:
953b843c749SSergey Zigachev 	DRM_WARN("No enough system memory\n");
954b843c749SSergey Zigachev def_value:
955b843c749SSergey Zigachev 	adev->pm.smu_prv_buffer_size = 0;
956b843c749SSergey Zigachev }
957b843c749SSergey Zigachev 
958b843c749SSergey Zigachev /**
959b843c749SSergey Zigachev  * amdgpu_device_check_arguments - validate module params
960b843c749SSergey Zigachev  *
961b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
962b843c749SSergey Zigachev  *
963b843c749SSergey Zigachev  * Validates certain module parameters and updates
964b843c749SSergey Zigachev  * the associated values used by the driver (all asics).
965b843c749SSergey Zigachev  */
amdgpu_device_check_arguments(struct amdgpu_device * adev)966b843c749SSergey Zigachev static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
967b843c749SSergey Zigachev {
968b843c749SSergey Zigachev 	if (amdgpu_sched_jobs < 4) {
969b843c749SSergey Zigachev 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
970b843c749SSergey Zigachev 			 amdgpu_sched_jobs);
971b843c749SSergey Zigachev 		amdgpu_sched_jobs = 4;
972b843c749SSergey Zigachev 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
973b843c749SSergey Zigachev 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
974b843c749SSergey Zigachev 			 amdgpu_sched_jobs);
975b843c749SSergey Zigachev 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
976b843c749SSergey Zigachev 	}
977b843c749SSergey Zigachev 
978b843c749SSergey Zigachev 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
979b843c749SSergey Zigachev 		/* gart size must be greater or equal to 32M */
980b843c749SSergey Zigachev 		dev_warn(adev->dev, "gart size (%d) too small\n",
981b843c749SSergey Zigachev 			 amdgpu_gart_size);
982b843c749SSergey Zigachev 		amdgpu_gart_size = -1;
983b843c749SSergey Zigachev 	}
984b843c749SSergey Zigachev 
985b843c749SSergey Zigachev 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
986b843c749SSergey Zigachev 		/* gtt size must be greater or equal to 32M */
987b843c749SSergey Zigachev 		dev_warn(adev->dev, "gtt size (%d) too small\n",
988b843c749SSergey Zigachev 				 amdgpu_gtt_size);
989b843c749SSergey Zigachev 		amdgpu_gtt_size = -1;
990b843c749SSergey Zigachev 	}
991b843c749SSergey Zigachev 
992b843c749SSergey Zigachev 	/* valid range is between 4 and 9 inclusive */
993b843c749SSergey Zigachev 	if (amdgpu_vm_fragment_size != -1 &&
994b843c749SSergey Zigachev 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
995b843c749SSergey Zigachev 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
996b843c749SSergey Zigachev 		amdgpu_vm_fragment_size = -1;
997b843c749SSergey Zigachev 	}
998b843c749SSergey Zigachev 
999b843c749SSergey Zigachev 	amdgpu_device_check_smu_prv_buffer_size(adev);
1000b843c749SSergey Zigachev 
1001b843c749SSergey Zigachev 	amdgpu_device_check_vm_size(adev);
1002b843c749SSergey Zigachev 
1003b843c749SSergey Zigachev 	amdgpu_device_check_block_size(adev);
1004b843c749SSergey Zigachev 
1005b843c749SSergey Zigachev 	if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1006b843c749SSergey Zigachev 	    !is_power_of_2(amdgpu_vram_page_split))) {
1007b843c749SSergey Zigachev 		dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1008b843c749SSergey Zigachev 			 amdgpu_vram_page_split);
1009b843c749SSergey Zigachev 		amdgpu_vram_page_split = 1024;
1010b843c749SSergey Zigachev 	}
1011b843c749SSergey Zigachev 
1012b843c749SSergey Zigachev 	if (amdgpu_lockup_timeout == 0) {
1013b843c749SSergey Zigachev 		dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
1014b843c749SSergey Zigachev 		amdgpu_lockup_timeout = 10000;
1015b843c749SSergey Zigachev 	}
1016b843c749SSergey Zigachev 
1017b843c749SSergey Zigachev 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1018b843c749SSergey Zigachev }
1019b843c749SSergey Zigachev 
1020b843c749SSergey Zigachev /**
1021b843c749SSergey Zigachev  * amdgpu_switcheroo_set_state - set switcheroo state
1022b843c749SSergey Zigachev  *
1023b843c749SSergey Zigachev  * @pdev: pci dev pointer
1024b843c749SSergey Zigachev  * @state: vga_switcheroo state
1025b843c749SSergey Zigachev  *
1026b843c749SSergey Zigachev  * Callback for the switcheroo driver.  Suspends or resumes the
1027b843c749SSergey Zigachev  * the asics before or after it is powered up using ACPI methods.
1028b843c749SSergey Zigachev  */
102978973132SSergey Zigachev #if 0
1030b843c749SSergey Zigachev static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1031b843c749SSergey Zigachev {
1032b843c749SSergey Zigachev 	struct drm_device *dev = pci_get_drvdata(pdev);
1033b843c749SSergey Zigachev 
1034b843c749SSergey Zigachev 	if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1035b843c749SSergey Zigachev 		return;
1036b843c749SSergey Zigachev 
1037b843c749SSergey Zigachev 	if (state == VGA_SWITCHEROO_ON) {
1038b843c749SSergey Zigachev 		pr_info("amdgpu: switched on\n");
1039b843c749SSergey Zigachev 		/* don't suspend or resume card normally */
1040b843c749SSergey Zigachev 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1041b843c749SSergey Zigachev 
1042b843c749SSergey Zigachev 		amdgpu_device_resume(dev, true, true);
1043b843c749SSergey Zigachev 
1044b843c749SSergey Zigachev 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1045b843c749SSergey Zigachev 		drm_kms_helper_poll_enable(dev);
1046b843c749SSergey Zigachev 	} else {
1047b843c749SSergey Zigachev 		pr_info("amdgpu: switched off\n");
1048b843c749SSergey Zigachev 		drm_kms_helper_poll_disable(dev);
1049b843c749SSergey Zigachev 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1050b843c749SSergey Zigachev 		amdgpu_device_suspend(dev, true, true);
1051b843c749SSergey Zigachev 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1052b843c749SSergey Zigachev 	}
1053b843c749SSergey Zigachev }
1054b843c749SSergey Zigachev 
1055b843c749SSergey Zigachev /**
1056b843c749SSergey Zigachev  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1057b843c749SSergey Zigachev  *
1058b843c749SSergey Zigachev  * @pdev: pci dev pointer
1059b843c749SSergey Zigachev  *
1060b843c749SSergey Zigachev  * Callback for the switcheroo driver.  Check of the switcheroo
1061b843c749SSergey Zigachev  * state can be changed.
1062b843c749SSergey Zigachev  * Returns true if the state can be changed, false if not.
1063b843c749SSergey Zigachev  */
1064b843c749SSergey Zigachev static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1065b843c749SSergey Zigachev {
1066b843c749SSergey Zigachev 	struct drm_device *dev = pci_get_drvdata(pdev);
1067b843c749SSergey Zigachev 
1068b843c749SSergey Zigachev 	/*
1069b843c749SSergey Zigachev 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1070b843c749SSergey Zigachev 	* locking inversion with the driver load path. And the access here is
1071b843c749SSergey Zigachev 	* completely racy anyway. So don't bother with locking for now.
1072b843c749SSergey Zigachev 	*/
1073b843c749SSergey Zigachev 	return dev->open_count == 0;
1074b843c749SSergey Zigachev }
1075b843c749SSergey Zigachev 
1076b843c749SSergey Zigachev static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1077b843c749SSergey Zigachev 	.set_gpu_state = amdgpu_switcheroo_set_state,
1078b843c749SSergey Zigachev 	.reprobe = NULL,
1079b843c749SSergey Zigachev 	.can_switch = amdgpu_switcheroo_can_switch,
1080b843c749SSergey Zigachev };
108178973132SSergey Zigachev #endif
1082b843c749SSergey Zigachev 
1083b843c749SSergey Zigachev /**
1084b843c749SSergey Zigachev  * amdgpu_device_ip_set_clockgating_state - set the CG state
1085b843c749SSergey Zigachev  *
1086b843c749SSergey Zigachev  * @dev: amdgpu_device pointer
1087b843c749SSergey Zigachev  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1088b843c749SSergey Zigachev  * @state: clockgating state (gate or ungate)
1089b843c749SSergey Zigachev  *
1090b843c749SSergey Zigachev  * Sets the requested clockgating state for all instances of
1091b843c749SSergey Zigachev  * the hardware IP specified.
1092b843c749SSergey Zigachev  * Returns the error code from the last instance.
1093b843c749SSergey Zigachev  */
amdgpu_device_ip_set_clockgating_state(void * dev,enum amd_ip_block_type block_type,enum amd_clockgating_state state)1094b843c749SSergey Zigachev int amdgpu_device_ip_set_clockgating_state(void *dev,
1095b843c749SSergey Zigachev 					   enum amd_ip_block_type block_type,
1096b843c749SSergey Zigachev 					   enum amd_clockgating_state state)
1097b843c749SSergey Zigachev {
1098b843c749SSergey Zigachev 	struct amdgpu_device *adev = dev;
1099b843c749SSergey Zigachev 	int i, r = 0;
1100b843c749SSergey Zigachev 
1101b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1102b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1103b843c749SSergey Zigachev 			continue;
1104b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != block_type)
1105b843c749SSergey Zigachev 			continue;
1106b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1107b843c749SSergey Zigachev 			continue;
1108b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1109b843c749SSergey Zigachev 			(void *)adev, state);
1110b843c749SSergey Zigachev 		if (r)
1111b843c749SSergey Zigachev 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1112b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1113b843c749SSergey Zigachev 	}
1114b843c749SSergey Zigachev 	return r;
1115b843c749SSergey Zigachev }
1116b843c749SSergey Zigachev 
1117b843c749SSergey Zigachev /**
1118b843c749SSergey Zigachev  * amdgpu_device_ip_set_powergating_state - set the PG state
1119b843c749SSergey Zigachev  *
1120b843c749SSergey Zigachev  * @dev: amdgpu_device pointer
1121b843c749SSergey Zigachev  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1122b843c749SSergey Zigachev  * @state: powergating state (gate or ungate)
1123b843c749SSergey Zigachev  *
1124b843c749SSergey Zigachev  * Sets the requested powergating state for all instances of
1125b843c749SSergey Zigachev  * the hardware IP specified.
1126b843c749SSergey Zigachev  * Returns the error code from the last instance.
1127b843c749SSergey Zigachev  */
amdgpu_device_ip_set_powergating_state(void * dev,enum amd_ip_block_type block_type,enum amd_powergating_state state)1128b843c749SSergey Zigachev int amdgpu_device_ip_set_powergating_state(void *dev,
1129b843c749SSergey Zigachev 					   enum amd_ip_block_type block_type,
1130b843c749SSergey Zigachev 					   enum amd_powergating_state state)
1131b843c749SSergey Zigachev {
1132b843c749SSergey Zigachev 	struct amdgpu_device *adev = dev;
1133b843c749SSergey Zigachev 	int i, r = 0;
1134b843c749SSergey Zigachev 
1135b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1136b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1137b843c749SSergey Zigachev 			continue;
1138b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != block_type)
1139b843c749SSergey Zigachev 			continue;
1140b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1141b843c749SSergey Zigachev 			continue;
1142b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1143b843c749SSergey Zigachev 			(void *)adev, state);
1144b843c749SSergey Zigachev 		if (r)
1145b843c749SSergey Zigachev 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1146b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1147b843c749SSergey Zigachev 	}
1148b843c749SSergey Zigachev 	return r;
1149b843c749SSergey Zigachev }
1150b843c749SSergey Zigachev 
1151b843c749SSergey Zigachev /**
1152b843c749SSergey Zigachev  * amdgpu_device_ip_get_clockgating_state - get the CG state
1153b843c749SSergey Zigachev  *
1154b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1155b843c749SSergey Zigachev  * @flags: clockgating feature flags
1156b843c749SSergey Zigachev  *
1157b843c749SSergey Zigachev  * Walks the list of IPs on the device and updates the clockgating
1158b843c749SSergey Zigachev  * flags for each IP.
1159b843c749SSergey Zigachev  * Updates @flags with the feature flags for each hardware IP where
1160b843c749SSergey Zigachev  * clockgating is enabled.
1161b843c749SSergey Zigachev  */
amdgpu_device_ip_get_clockgating_state(struct amdgpu_device * adev,u32 * flags)1162b843c749SSergey Zigachev void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1163b843c749SSergey Zigachev 					    u32 *flags)
1164b843c749SSergey Zigachev {
1165b843c749SSergey Zigachev 	int i;
1166b843c749SSergey Zigachev 
1167b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1168b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1169b843c749SSergey Zigachev 			continue;
1170b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1171b843c749SSergey Zigachev 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1172b843c749SSergey Zigachev 	}
1173b843c749SSergey Zigachev }
1174b843c749SSergey Zigachev 
1175b843c749SSergey Zigachev /**
1176b843c749SSergey Zigachev  * amdgpu_device_ip_wait_for_idle - wait for idle
1177b843c749SSergey Zigachev  *
1178b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1179b843c749SSergey Zigachev  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1180b843c749SSergey Zigachev  *
1181b843c749SSergey Zigachev  * Waits for the request hardware IP to be idle.
1182b843c749SSergey Zigachev  * Returns 0 for success or a negative error code on failure.
1183b843c749SSergey Zigachev  */
amdgpu_device_ip_wait_for_idle(struct amdgpu_device * adev,enum amd_ip_block_type block_type)1184b843c749SSergey Zigachev int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1185b843c749SSergey Zigachev 				   enum amd_ip_block_type block_type)
1186b843c749SSergey Zigachev {
1187b843c749SSergey Zigachev 	int i, r;
1188b843c749SSergey Zigachev 
1189b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1190b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1191b843c749SSergey Zigachev 			continue;
1192b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == block_type) {
1193b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1194b843c749SSergey Zigachev 			if (r)
1195b843c749SSergey Zigachev 				return r;
1196b843c749SSergey Zigachev 			break;
1197b843c749SSergey Zigachev 		}
1198b843c749SSergey Zigachev 	}
1199b843c749SSergey Zigachev 	return 0;
1200b843c749SSergey Zigachev 
1201b843c749SSergey Zigachev }
1202b843c749SSergey Zigachev 
1203b843c749SSergey Zigachev /**
1204b843c749SSergey Zigachev  * amdgpu_device_ip_is_idle - is the hardware IP idle
1205b843c749SSergey Zigachev  *
1206b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1207b843c749SSergey Zigachev  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1208b843c749SSergey Zigachev  *
1209b843c749SSergey Zigachev  * Check if the hardware IP is idle or not.
1210b843c749SSergey Zigachev  * Returns true if it the IP is idle, false if not.
1211b843c749SSergey Zigachev  */
amdgpu_device_ip_is_idle(struct amdgpu_device * adev,enum amd_ip_block_type block_type)1212b843c749SSergey Zigachev bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1213b843c749SSergey Zigachev 			      enum amd_ip_block_type block_type)
1214b843c749SSergey Zigachev {
1215b843c749SSergey Zigachev 	int i;
1216b843c749SSergey Zigachev 
1217b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1218b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1219b843c749SSergey Zigachev 			continue;
1220b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == block_type)
1221b843c749SSergey Zigachev 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1222b843c749SSergey Zigachev 	}
1223b843c749SSergey Zigachev 	return true;
1224b843c749SSergey Zigachev 
1225b843c749SSergey Zigachev }
1226b843c749SSergey Zigachev 
1227b843c749SSergey Zigachev /**
1228b843c749SSergey Zigachev  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1229b843c749SSergey Zigachev  *
1230b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1231b843c749SSergey Zigachev  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1232b843c749SSergey Zigachev  *
1233b843c749SSergey Zigachev  * Returns a pointer to the hardware IP block structure
1234b843c749SSergey Zigachev  * if it exists for the asic, otherwise NULL.
1235b843c749SSergey Zigachev  */
1236b843c749SSergey Zigachev struct amdgpu_ip_block *
amdgpu_device_ip_get_ip_block(struct amdgpu_device * adev,enum amd_ip_block_type type)1237b843c749SSergey Zigachev amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1238b843c749SSergey Zigachev 			      enum amd_ip_block_type type)
1239b843c749SSergey Zigachev {
1240b843c749SSergey Zigachev 	int i;
1241b843c749SSergey Zigachev 
1242b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++)
1243b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == type)
1244b843c749SSergey Zigachev 			return &adev->ip_blocks[i];
1245b843c749SSergey Zigachev 
1246b843c749SSergey Zigachev 	return NULL;
1247b843c749SSergey Zigachev }
1248b843c749SSergey Zigachev 
1249b843c749SSergey Zigachev /**
1250b843c749SSergey Zigachev  * amdgpu_device_ip_block_version_cmp
1251b843c749SSergey Zigachev  *
1252b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1253b843c749SSergey Zigachev  * @type: enum amd_ip_block_type
1254b843c749SSergey Zigachev  * @major: major version
1255b843c749SSergey Zigachev  * @minor: minor version
1256b843c749SSergey Zigachev  *
1257b843c749SSergey Zigachev  * return 0 if equal or greater
1258b843c749SSergey Zigachev  * return 1 if smaller or the ip_block doesn't exist
1259b843c749SSergey Zigachev  */
amdgpu_device_ip_block_version_cmp(struct amdgpu_device * adev,enum amd_ip_block_type type,u32 major,u32 minor)1260b843c749SSergey Zigachev int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1261b843c749SSergey Zigachev 				       enum amd_ip_block_type type,
1262b843c749SSergey Zigachev 				       u32 major, u32 minor)
1263b843c749SSergey Zigachev {
1264b843c749SSergey Zigachev 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1265b843c749SSergey Zigachev 
1266b843c749SSergey Zigachev 	if (ip_block && ((ip_block->version->major > major) ||
1267b843c749SSergey Zigachev 			((ip_block->version->major == major) &&
1268b843c749SSergey Zigachev 			(ip_block->version->minor >= minor))))
1269b843c749SSergey Zigachev 		return 0;
1270b843c749SSergey Zigachev 
1271b843c749SSergey Zigachev 	return 1;
1272b843c749SSergey Zigachev }
1273b843c749SSergey Zigachev 
1274b843c749SSergey Zigachev /**
1275b843c749SSergey Zigachev  * amdgpu_device_ip_block_add
1276b843c749SSergey Zigachev  *
1277b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1278b843c749SSergey Zigachev  * @ip_block_version: pointer to the IP to add
1279b843c749SSergey Zigachev  *
1280b843c749SSergey Zigachev  * Adds the IP block driver information to the collection of IPs
1281b843c749SSergey Zigachev  * on the asic.
1282b843c749SSergey Zigachev  */
amdgpu_device_ip_block_add(struct amdgpu_device * adev,const struct amdgpu_ip_block_version * ip_block_version)1283b843c749SSergey Zigachev int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1284b843c749SSergey Zigachev 			       const struct amdgpu_ip_block_version *ip_block_version)
1285b843c749SSergey Zigachev {
1286b843c749SSergey Zigachev 	if (!ip_block_version)
1287b843c749SSergey Zigachev 		return -EINVAL;
1288b843c749SSergey Zigachev 
1289b843c749SSergey Zigachev 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1290b843c749SSergey Zigachev 		  ip_block_version->funcs->name);
1291b843c749SSergey Zigachev 
1292b843c749SSergey Zigachev 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1293b843c749SSergey Zigachev 
1294b843c749SSergey Zigachev 	return 0;
1295b843c749SSergey Zigachev }
1296b843c749SSergey Zigachev 
1297b843c749SSergey Zigachev /**
1298b843c749SSergey Zigachev  * amdgpu_device_enable_virtual_display - enable virtual display feature
1299b843c749SSergey Zigachev  *
1300b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1301b843c749SSergey Zigachev  *
1302b843c749SSergey Zigachev  * Enabled the virtual display feature if the user has enabled it via
1303b843c749SSergey Zigachev  * the module parameter virtual_display.  This feature provides a virtual
1304b843c749SSergey Zigachev  * display hardware on headless boards or in virtualized environments.
1305b843c749SSergey Zigachev  * This function parses and validates the configuration string specified by
1306b843c749SSergey Zigachev  * the user and configues the virtual display configuration (number of
1307b843c749SSergey Zigachev  * virtual connectors, crtcs, etc.) specified.
1308b843c749SSergey Zigachev  */
amdgpu_device_enable_virtual_display(struct amdgpu_device * adev)1309b843c749SSergey Zigachev static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1310b843c749SSergey Zigachev {
1311b843c749SSergey Zigachev 	adev->enable_virtual_display = false;
1312b843c749SSergey Zigachev 
1313b843c749SSergey Zigachev 	if (amdgpu_virtual_display) {
1314b843c749SSergey Zigachev 		struct drm_device *ddev = adev->ddev;
1315b843c749SSergey Zigachev 		const char *pci_address_name = pci_name(ddev->pdev);
1316b843c749SSergey Zigachev 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1317b843c749SSergey Zigachev 
1318b843c749SSergey Zigachev 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1319b843c749SSergey Zigachev 		pciaddstr_tmp = pciaddstr;
1320b843c749SSergey Zigachev 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1321b843c749SSergey Zigachev 			pciaddname = strsep(&pciaddname_tmp, ",");
1322b843c749SSergey Zigachev 			if (!strcmp("all", pciaddname)
1323b843c749SSergey Zigachev 			    || !strcmp(pci_address_name, pciaddname)) {
1324b843c749SSergey Zigachev 				long num_crtc;
1325b843c749SSergey Zigachev 				int res = -1;
1326b843c749SSergey Zigachev 
1327b843c749SSergey Zigachev 				adev->enable_virtual_display = true;
1328b843c749SSergey Zigachev 
1329b843c749SSergey Zigachev 				if (pciaddname_tmp)
1330b843c749SSergey Zigachev 					res = kstrtol(pciaddname_tmp, 10,
1331b843c749SSergey Zigachev 						      &num_crtc);
1332b843c749SSergey Zigachev 
1333b843c749SSergey Zigachev 				if (!res) {
1334b843c749SSergey Zigachev 					if (num_crtc < 1)
1335b843c749SSergey Zigachev 						num_crtc = 1;
1336b843c749SSergey Zigachev 					if (num_crtc > 6)
1337b843c749SSergey Zigachev 						num_crtc = 6;
1338b843c749SSergey Zigachev 					adev->mode_info.num_crtc = num_crtc;
1339b843c749SSergey Zigachev 				} else {
1340b843c749SSergey Zigachev 					adev->mode_info.num_crtc = 1;
1341b843c749SSergey Zigachev 				}
1342b843c749SSergey Zigachev 				break;
1343b843c749SSergey Zigachev 			}
1344b843c749SSergey Zigachev 		}
1345b843c749SSergey Zigachev 
1346b843c749SSergey Zigachev 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1347b843c749SSergey Zigachev 			 amdgpu_virtual_display, pci_address_name,
1348b843c749SSergey Zigachev 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1349b843c749SSergey Zigachev 
1350b843c749SSergey Zigachev 		kfree(pciaddstr);
1351b843c749SSergey Zigachev 	}
1352b843c749SSergey Zigachev }
1353b843c749SSergey Zigachev 
1354b843c749SSergey Zigachev /**
1355b843c749SSergey Zigachev  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1356b843c749SSergey Zigachev  *
1357b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1358b843c749SSergey Zigachev  *
1359b843c749SSergey Zigachev  * Parses the asic configuration parameters specified in the gpu info
1360b843c749SSergey Zigachev  * firmware and makes them availale to the driver for use in configuring
1361b843c749SSergey Zigachev  * the asic.
1362b843c749SSergey Zigachev  * Returns 0 on success, -EINVAL on failure.
1363b843c749SSergey Zigachev  */
amdgpu_device_parse_gpu_info_fw(struct amdgpu_device * adev)1364b843c749SSergey Zigachev static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1365b843c749SSergey Zigachev {
1366b843c749SSergey Zigachev 	const char *chip_name;
1367b843c749SSergey Zigachev 	char fw_name[30];
1368b843c749SSergey Zigachev 	int err;
1369b843c749SSergey Zigachev 	const struct gpu_info_firmware_header_v1_0 *hdr;
1370b843c749SSergey Zigachev 
1371b843c749SSergey Zigachev 	adev->firmware.gpu_info_fw = NULL;
1372b843c749SSergey Zigachev 
1373b843c749SSergey Zigachev 	switch (adev->asic_type) {
1374b843c749SSergey Zigachev 	case CHIP_TOPAZ:
1375b843c749SSergey Zigachev 	case CHIP_TONGA:
1376b843c749SSergey Zigachev 	case CHIP_FIJI:
1377b843c749SSergey Zigachev 	case CHIP_POLARIS10:
1378b843c749SSergey Zigachev 	case CHIP_POLARIS11:
1379b843c749SSergey Zigachev 	case CHIP_POLARIS12:
1380b843c749SSergey Zigachev 	case CHIP_VEGAM:
1381b843c749SSergey Zigachev 	case CHIP_CARRIZO:
1382b843c749SSergey Zigachev 	case CHIP_STONEY:
1383b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_SI
1384b843c749SSergey Zigachev 	case CHIP_VERDE:
1385b843c749SSergey Zigachev 	case CHIP_TAHITI:
1386b843c749SSergey Zigachev 	case CHIP_PITCAIRN:
1387b843c749SSergey Zigachev 	case CHIP_OLAND:
1388b843c749SSergey Zigachev 	case CHIP_HAINAN:
1389b843c749SSergey Zigachev #endif
1390b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_CIK
1391b843c749SSergey Zigachev 	case CHIP_BONAIRE:
1392b843c749SSergey Zigachev 	case CHIP_HAWAII:
1393b843c749SSergey Zigachev 	case CHIP_KAVERI:
1394b843c749SSergey Zigachev 	case CHIP_KABINI:
1395b843c749SSergey Zigachev 	case CHIP_MULLINS:
1396b843c749SSergey Zigachev #endif
1397b843c749SSergey Zigachev 	case CHIP_VEGA20:
1398b843c749SSergey Zigachev 	default:
1399b843c749SSergey Zigachev 		return 0;
1400b843c749SSergey Zigachev 	case CHIP_VEGA10:
1401b843c749SSergey Zigachev 		chip_name = "vega10";
1402b843c749SSergey Zigachev 		break;
1403b843c749SSergey Zigachev 	case CHIP_VEGA12:
1404b843c749SSergey Zigachev 		chip_name = "vega12";
1405b843c749SSergey Zigachev 		break;
1406b843c749SSergey Zigachev 	case CHIP_RAVEN:
1407b843c749SSergey Zigachev 		chip_name = "raven";
1408b843c749SSergey Zigachev 		break;
1409b843c749SSergey Zigachev 	}
1410b843c749SSergey Zigachev 
1411*809f3802SSergey Zigachev 	snprintf(fw_name, sizeof(fw_name), "amdgpufw_%s_gpu_info", chip_name);
1412b843c749SSergey Zigachev 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1413b843c749SSergey Zigachev 	if (err) {
1414b843c749SSergey Zigachev 		dev_err(adev->dev,
1415b843c749SSergey Zigachev 			"Failed to load gpu_info firmware \"%s\"\n",
1416b843c749SSergey Zigachev 			fw_name);
1417b843c749SSergey Zigachev 		goto out;
1418b843c749SSergey Zigachev 	}
1419b843c749SSergey Zigachev 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1420b843c749SSergey Zigachev 	if (err) {
1421b843c749SSergey Zigachev 		dev_err(adev->dev,
1422b843c749SSergey Zigachev 			"Failed to validate gpu_info firmware \"%s\"\n",
1423b843c749SSergey Zigachev 			fw_name);
1424b843c749SSergey Zigachev 		goto out;
1425b843c749SSergey Zigachev 	}
1426b843c749SSergey Zigachev 
1427b843c749SSergey Zigachev 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1428b843c749SSergey Zigachev 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1429b843c749SSergey Zigachev 
1430b843c749SSergey Zigachev 	switch (hdr->version_major) {
1431b843c749SSergey Zigachev 	case 1:
1432b843c749SSergey Zigachev 	{
1433b843c749SSergey Zigachev 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1434b843c749SSergey Zigachev 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1435b843c749SSergey Zigachev 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1436b843c749SSergey Zigachev 
1437b843c749SSergey Zigachev 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1438b843c749SSergey Zigachev 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1439b843c749SSergey Zigachev 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1440b843c749SSergey Zigachev 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1441b843c749SSergey Zigachev 		adev->gfx.config.max_texture_channel_caches =
1442b843c749SSergey Zigachev 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1443b843c749SSergey Zigachev 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1444b843c749SSergey Zigachev 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1445b843c749SSergey Zigachev 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1446b843c749SSergey Zigachev 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1447b843c749SSergey Zigachev 		adev->gfx.config.double_offchip_lds_buf =
1448b843c749SSergey Zigachev 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1449b843c749SSergey Zigachev 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1450b843c749SSergey Zigachev 		adev->gfx.cu_info.max_waves_per_simd =
1451b843c749SSergey Zigachev 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1452b843c749SSergey Zigachev 		adev->gfx.cu_info.max_scratch_slots_per_cu =
1453b843c749SSergey Zigachev 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1454b843c749SSergey Zigachev 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1455b843c749SSergey Zigachev 		break;
1456b843c749SSergey Zigachev 	}
1457b843c749SSergey Zigachev 	default:
1458b843c749SSergey Zigachev 		dev_err(adev->dev,
1459b843c749SSergey Zigachev 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1460b843c749SSergey Zigachev 		err = -EINVAL;
1461b843c749SSergey Zigachev 		goto out;
1462b843c749SSergey Zigachev 	}
1463b843c749SSergey Zigachev out:
1464b843c749SSergey Zigachev 	return err;
1465b843c749SSergey Zigachev }
1466b843c749SSergey Zigachev 
1467b843c749SSergey Zigachev /**
1468b843c749SSergey Zigachev  * amdgpu_device_ip_early_init - run early init for hardware IPs
1469b843c749SSergey Zigachev  *
1470b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1471b843c749SSergey Zigachev  *
1472b843c749SSergey Zigachev  * Early initialization pass for hardware IPs.  The hardware IPs that make
1473b843c749SSergey Zigachev  * up each asic are discovered each IP's early_init callback is run.  This
1474b843c749SSergey Zigachev  * is the first stage in initializing the asic.
1475b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1476b843c749SSergey Zigachev  */
amdgpu_device_ip_early_init(struct amdgpu_device * adev)1477b843c749SSergey Zigachev static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1478b843c749SSergey Zigachev {
1479b843c749SSergey Zigachev 	int i, r;
1480b843c749SSergey Zigachev 
1481b843c749SSergey Zigachev 	amdgpu_device_enable_virtual_display(adev);
1482b843c749SSergey Zigachev 
1483b843c749SSergey Zigachev 	switch (adev->asic_type) {
1484b843c749SSergey Zigachev 	case CHIP_TOPAZ:
1485b843c749SSergey Zigachev 	case CHIP_TONGA:
1486b843c749SSergey Zigachev 	case CHIP_FIJI:
1487b843c749SSergey Zigachev 	case CHIP_POLARIS10:
1488b843c749SSergey Zigachev 	case CHIP_POLARIS11:
1489b843c749SSergey Zigachev 	case CHIP_POLARIS12:
1490b843c749SSergey Zigachev 	case CHIP_VEGAM:
1491b843c749SSergey Zigachev 	case CHIP_CARRIZO:
1492b843c749SSergey Zigachev 	case CHIP_STONEY:
1493b843c749SSergey Zigachev 		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1494b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_CZ;
1495b843c749SSergey Zigachev 		else
1496b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_VI;
1497b843c749SSergey Zigachev 
1498b843c749SSergey Zigachev 		r = vi_set_ip_blocks(adev);
1499b843c749SSergey Zigachev 		if (r)
1500b843c749SSergey Zigachev 			return r;
1501b843c749SSergey Zigachev 		break;
1502b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_SI
1503b843c749SSergey Zigachev 	case CHIP_VERDE:
1504b843c749SSergey Zigachev 	case CHIP_TAHITI:
1505b843c749SSergey Zigachev 	case CHIP_PITCAIRN:
1506b843c749SSergey Zigachev 	case CHIP_OLAND:
1507b843c749SSergey Zigachev 	case CHIP_HAINAN:
1508b843c749SSergey Zigachev 		adev->family = AMDGPU_FAMILY_SI;
1509b843c749SSergey Zigachev 		r = si_set_ip_blocks(adev);
1510b843c749SSergey Zigachev 		if (r)
1511b843c749SSergey Zigachev 			return r;
1512b843c749SSergey Zigachev 		break;
1513b843c749SSergey Zigachev #endif
1514b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_CIK
1515b843c749SSergey Zigachev 	case CHIP_BONAIRE:
1516b843c749SSergey Zigachev 	case CHIP_HAWAII:
1517b843c749SSergey Zigachev 	case CHIP_KAVERI:
1518b843c749SSergey Zigachev 	case CHIP_KABINI:
1519b843c749SSergey Zigachev 	case CHIP_MULLINS:
1520b843c749SSergey Zigachev 		if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1521b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_CI;
1522b843c749SSergey Zigachev 		else
1523b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_KV;
1524b843c749SSergey Zigachev 
1525b843c749SSergey Zigachev 		r = cik_set_ip_blocks(adev);
1526b843c749SSergey Zigachev 		if (r)
1527b843c749SSergey Zigachev 			return r;
1528b843c749SSergey Zigachev 		break;
1529b843c749SSergey Zigachev #endif
1530b843c749SSergey Zigachev 	case CHIP_VEGA10:
1531b843c749SSergey Zigachev 	case CHIP_VEGA12:
1532b843c749SSergey Zigachev 	case CHIP_VEGA20:
1533b843c749SSergey Zigachev 	case CHIP_RAVEN:
1534b843c749SSergey Zigachev 		if (adev->asic_type == CHIP_RAVEN)
1535b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_RV;
1536b843c749SSergey Zigachev 		else
1537b843c749SSergey Zigachev 			adev->family = AMDGPU_FAMILY_AI;
1538b843c749SSergey Zigachev 
1539b843c749SSergey Zigachev 		r = soc15_set_ip_blocks(adev);
1540b843c749SSergey Zigachev 		if (r)
1541b843c749SSergey Zigachev 			return r;
1542b843c749SSergey Zigachev 		break;
1543b843c749SSergey Zigachev 	default:
1544b843c749SSergey Zigachev 		/* FIXME: not supported yet */
1545b843c749SSergey Zigachev 		return -EINVAL;
1546b843c749SSergey Zigachev 	}
1547b843c749SSergey Zigachev 
1548b843c749SSergey Zigachev 	r = amdgpu_device_parse_gpu_info_fw(adev);
1549b843c749SSergey Zigachev 	if (r)
1550b843c749SSergey Zigachev 		return r;
1551b843c749SSergey Zigachev 
1552b843c749SSergey Zigachev 	amdgpu_amdkfd_device_probe(adev);
1553b843c749SSergey Zigachev 
1554b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev)) {
1555b843c749SSergey Zigachev 		r = amdgpu_virt_request_full_gpu(adev, true);
1556b843c749SSergey Zigachev 		if (r)
1557b843c749SSergey Zigachev 			return -EAGAIN;
1558b843c749SSergey Zigachev 	}
1559b843c749SSergey Zigachev 
1560b843c749SSergey Zigachev 	adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
1561b843c749SSergey Zigachev 
1562b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1563b843c749SSergey Zigachev 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1564b843c749SSergey Zigachev 			DRM_ERROR("disabled ip block: %d <%s>\n",
1565b843c749SSergey Zigachev 				  i, adev->ip_blocks[i].version->funcs->name);
1566b843c749SSergey Zigachev 			adev->ip_blocks[i].status.valid = false;
1567b843c749SSergey Zigachev 		} else {
1568b843c749SSergey Zigachev 			if (adev->ip_blocks[i].version->funcs->early_init) {
1569b843c749SSergey Zigachev 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1570b843c749SSergey Zigachev 				if (r == -ENOENT) {
1571b843c749SSergey Zigachev 					adev->ip_blocks[i].status.valid = false;
1572b843c749SSergey Zigachev 				} else if (r) {
1573b843c749SSergey Zigachev 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
1574b843c749SSergey Zigachev 						  adev->ip_blocks[i].version->funcs->name, r);
1575b843c749SSergey Zigachev 					return r;
1576b843c749SSergey Zigachev 				} else {
1577b843c749SSergey Zigachev 					adev->ip_blocks[i].status.valid = true;
1578b843c749SSergey Zigachev 				}
1579b843c749SSergey Zigachev 			} else {
1580b843c749SSergey Zigachev 				adev->ip_blocks[i].status.valid = true;
1581b843c749SSergey Zigachev 			}
1582b843c749SSergey Zigachev 		}
1583b843c749SSergey Zigachev 	}
1584b843c749SSergey Zigachev 
1585b843c749SSergey Zigachev 	adev->cg_flags &= amdgpu_cg_mask;
1586b843c749SSergey Zigachev 	adev->pg_flags &= amdgpu_pg_mask;
1587b843c749SSergey Zigachev 
1588b843c749SSergey Zigachev 	return 0;
1589b843c749SSergey Zigachev }
1590b843c749SSergey Zigachev 
1591b843c749SSergey Zigachev /**
1592b843c749SSergey Zigachev  * amdgpu_device_ip_init - run init for hardware IPs
1593b843c749SSergey Zigachev  *
1594b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1595b843c749SSergey Zigachev  *
1596b843c749SSergey Zigachev  * Main initialization pass for hardware IPs.  The list of all the hardware
1597b843c749SSergey Zigachev  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1598b843c749SSergey Zigachev  * are run.  sw_init initializes the software state associated with each IP
1599b843c749SSergey Zigachev  * and hw_init initializes the hardware associated with each IP.
1600b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1601b843c749SSergey Zigachev  */
amdgpu_device_ip_init(struct amdgpu_device * adev)1602b843c749SSergey Zigachev static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1603b843c749SSergey Zigachev {
1604b843c749SSergey Zigachev 	int i, r;
1605b843c749SSergey Zigachev 
1606b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1607b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1608b843c749SSergey Zigachev 			continue;
1609b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1610b843c749SSergey Zigachev 		if (r) {
1611b843c749SSergey Zigachev 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1612b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1613b843c749SSergey Zigachev 			return r;
1614b843c749SSergey Zigachev 		}
1615b843c749SSergey Zigachev 		adev->ip_blocks[i].status.sw = true;
1616b843c749SSergey Zigachev 
1617b843c749SSergey Zigachev 		/* need to do gmc hw init early so we can allocate gpu mem */
1618b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1619b843c749SSergey Zigachev 			r = amdgpu_device_vram_scratch_init(adev);
1620b843c749SSergey Zigachev 			if (r) {
1621b843c749SSergey Zigachev 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1622b843c749SSergey Zigachev 				return r;
1623b843c749SSergey Zigachev 			}
1624b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1625b843c749SSergey Zigachev 			if (r) {
1626b843c749SSergey Zigachev 				DRM_ERROR("hw_init %d failed %d\n", i, r);
1627b843c749SSergey Zigachev 				return r;
1628b843c749SSergey Zigachev 			}
1629b843c749SSergey Zigachev 			r = amdgpu_device_wb_init(adev);
1630b843c749SSergey Zigachev 			if (r) {
1631b843c749SSergey Zigachev 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
1632b843c749SSergey Zigachev 				return r;
1633b843c749SSergey Zigachev 			}
1634b843c749SSergey Zigachev 			adev->ip_blocks[i].status.hw = true;
1635b843c749SSergey Zigachev 
1636b843c749SSergey Zigachev 			/* right after GMC hw init, we create CSA */
1637b843c749SSergey Zigachev 			if (amdgpu_sriov_vf(adev)) {
1638b843c749SSergey Zigachev 				r = amdgpu_allocate_static_csa(adev);
1639b843c749SSergey Zigachev 				if (r) {
1640b843c749SSergey Zigachev 					DRM_ERROR("allocate CSA failed %d\n", r);
1641b843c749SSergey Zigachev 					return r;
1642b843c749SSergey Zigachev 				}
1643b843c749SSergey Zigachev 			}
1644b843c749SSergey Zigachev 		}
1645b843c749SSergey Zigachev 	}
1646b843c749SSergey Zigachev 
1647b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1648b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.sw)
1649b843c749SSergey Zigachev 			continue;
1650b843c749SSergey Zigachev 		if (adev->ip_blocks[i].status.hw)
1651b843c749SSergey Zigachev 			continue;
1652b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1653b843c749SSergey Zigachev 		if (r) {
1654b843c749SSergey Zigachev 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1655b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1656b843c749SSergey Zigachev 			return r;
1657b843c749SSergey Zigachev 		}
1658b843c749SSergey Zigachev 		adev->ip_blocks[i].status.hw = true;
1659b843c749SSergey Zigachev 	}
1660b843c749SSergey Zigachev 
1661b843c749SSergey Zigachev 	amdgpu_amdkfd_device_init(adev);
1662b843c749SSergey Zigachev 
1663b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev)) {
1664b843c749SSergey Zigachev 		amdgpu_virt_init_data_exchange(adev);
1665b843c749SSergey Zigachev 		amdgpu_virt_release_full_gpu(adev, true);
1666b843c749SSergey Zigachev 	}
1667b843c749SSergey Zigachev 
1668b843c749SSergey Zigachev 	return 0;
1669b843c749SSergey Zigachev }
1670b843c749SSergey Zigachev 
1671b843c749SSergey Zigachev /**
1672b843c749SSergey Zigachev  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1673b843c749SSergey Zigachev  *
1674b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1675b843c749SSergey Zigachev  *
1676b843c749SSergey Zigachev  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
1677b843c749SSergey Zigachev  * this function before a GPU reset.  If the value is retained after a
1678b843c749SSergey Zigachev  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
1679b843c749SSergey Zigachev  */
amdgpu_device_fill_reset_magic(struct amdgpu_device * adev)1680b843c749SSergey Zigachev static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
1681b843c749SSergey Zigachev {
1682b843c749SSergey Zigachev 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1683b843c749SSergey Zigachev }
1684b843c749SSergey Zigachev 
1685b843c749SSergey Zigachev /**
1686b843c749SSergey Zigachev  * amdgpu_device_check_vram_lost - check if vram is valid
1687b843c749SSergey Zigachev  *
1688b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1689b843c749SSergey Zigachev  *
1690b843c749SSergey Zigachev  * Checks the reset magic value written to the gart pointer in VRAM.
1691b843c749SSergey Zigachev  * The driver calls this after a GPU reset to see if the contents of
1692b843c749SSergey Zigachev  * VRAM is lost or now.
1693b843c749SSergey Zigachev  * returns true if vram is lost, false if not.
1694b843c749SSergey Zigachev  */
amdgpu_device_check_vram_lost(struct amdgpu_device * adev)1695b843c749SSergey Zigachev static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
1696b843c749SSergey Zigachev {
1697b843c749SSergey Zigachev 	return !!memcmp(adev->gart.ptr, adev->reset_magic,
1698b843c749SSergey Zigachev 			AMDGPU_RESET_MAGIC_NUM);
1699b843c749SSergey Zigachev }
1700b843c749SSergey Zigachev 
1701b843c749SSergey Zigachev /**
1702b843c749SSergey Zigachev  * amdgpu_device_ip_late_set_cg_state - late init for clockgating
1703b843c749SSergey Zigachev  *
1704b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1705b843c749SSergey Zigachev  *
1706b843c749SSergey Zigachev  * Late initialization pass enabling clockgating for hardware IPs.
1707b843c749SSergey Zigachev  * The list of all the hardware IPs that make up the asic is walked and the
1708b843c749SSergey Zigachev  * set_clockgating_state callbacks are run.  This stage is run late
1709b843c749SSergey Zigachev  * in the init process.
1710b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1711b843c749SSergey Zigachev  */
amdgpu_device_ip_late_set_cg_state(struct amdgpu_device * adev)1712b843c749SSergey Zigachev static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
1713b843c749SSergey Zigachev {
1714b843c749SSergey Zigachev 	int i = 0, r;
1715b843c749SSergey Zigachev 
1716b843c749SSergey Zigachev 	if (amdgpu_emu_mode == 1)
1717b843c749SSergey Zigachev 		return 0;
1718b843c749SSergey Zigachev 
1719b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1720b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1721b843c749SSergey Zigachev 			continue;
1722b843c749SSergey Zigachev 		/* skip CG for VCE/UVD, it's handled specially */
1723b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1724b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1725b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1726b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1727b843c749SSergey Zigachev 			/* enable clockgating to save power */
1728b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1729b843c749SSergey Zigachev 										     AMD_CG_STATE_GATE);
1730b843c749SSergey Zigachev 			if (r) {
1731b843c749SSergey Zigachev 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1732b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1733b843c749SSergey Zigachev 				return r;
1734b843c749SSergey Zigachev 			}
1735b843c749SSergey Zigachev 		}
1736b843c749SSergey Zigachev 	}
1737b843c749SSergey Zigachev 
1738b843c749SSergey Zigachev 	return 0;
1739b843c749SSergey Zigachev }
1740b843c749SSergey Zigachev 
amdgpu_device_ip_late_set_pg_state(struct amdgpu_device * adev)1741b843c749SSergey Zigachev static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
1742b843c749SSergey Zigachev {
1743b843c749SSergey Zigachev 	int i = 0, r;
1744b843c749SSergey Zigachev 
1745b843c749SSergey Zigachev 	if (amdgpu_emu_mode == 1)
1746b843c749SSergey Zigachev 		return 0;
1747b843c749SSergey Zigachev 
1748b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1749b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1750b843c749SSergey Zigachev 			continue;
1751b843c749SSergey Zigachev 		/* skip CG for VCE/UVD, it's handled specially */
1752b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1753b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1754b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1755b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
1756b843c749SSergey Zigachev 			/* enable powergating to save power */
1757b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1758b843c749SSergey Zigachev 										     AMD_PG_STATE_GATE);
1759b843c749SSergey Zigachev 			if (r) {
1760b843c749SSergey Zigachev 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
1761b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1762b843c749SSergey Zigachev 				return r;
1763b843c749SSergey Zigachev 			}
1764b843c749SSergey Zigachev 		}
1765b843c749SSergey Zigachev 	}
1766b843c749SSergey Zigachev 	return 0;
1767b843c749SSergey Zigachev }
1768b843c749SSergey Zigachev 
1769b843c749SSergey Zigachev /**
1770b843c749SSergey Zigachev  * amdgpu_device_ip_late_init - run late init for hardware IPs
1771b843c749SSergey Zigachev  *
1772b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1773b843c749SSergey Zigachev  *
1774b843c749SSergey Zigachev  * Late initialization pass for hardware IPs.  The list of all the hardware
1775b843c749SSergey Zigachev  * IPs that make up the asic is walked and the late_init callbacks are run.
1776b843c749SSergey Zigachev  * late_init covers any special initialization that an IP requires
1777b843c749SSergey Zigachev  * after all of the have been initialized or something that needs to happen
1778b843c749SSergey Zigachev  * late in the init process.
1779b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1780b843c749SSergey Zigachev  */
amdgpu_device_ip_late_init(struct amdgpu_device * adev)1781b843c749SSergey Zigachev static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
1782b843c749SSergey Zigachev {
1783b843c749SSergey Zigachev 	int i = 0, r;
1784b843c749SSergey Zigachev 
1785b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1786b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1787b843c749SSergey Zigachev 			continue;
1788b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->funcs->late_init) {
1789b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1790b843c749SSergey Zigachev 			if (r) {
1791b843c749SSergey Zigachev 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
1792b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1793b843c749SSergey Zigachev 				return r;
1794b843c749SSergey Zigachev 			}
1795b843c749SSergey Zigachev 			adev->ip_blocks[i].status.late_initialized = true;
1796b843c749SSergey Zigachev 		}
1797b843c749SSergey Zigachev 	}
1798b843c749SSergey Zigachev 
1799b843c749SSergey Zigachev 	amdgpu_device_ip_late_set_cg_state(adev);
1800b843c749SSergey Zigachev 	amdgpu_device_ip_late_set_pg_state(adev);
1801b843c749SSergey Zigachev 
1802b843c749SSergey Zigachev 	queue_delayed_work(system_wq, &adev->late_init_work,
1803b843c749SSergey Zigachev 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
1804b843c749SSergey Zigachev 
1805b843c749SSergey Zigachev 	amdgpu_device_fill_reset_magic(adev);
1806b843c749SSergey Zigachev 
1807b843c749SSergey Zigachev 	return 0;
1808b843c749SSergey Zigachev }
1809b843c749SSergey Zigachev 
1810b843c749SSergey Zigachev /**
1811b843c749SSergey Zigachev  * amdgpu_device_ip_fini - run fini for hardware IPs
1812b843c749SSergey Zigachev  *
1813b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1814b843c749SSergey Zigachev  *
1815b843c749SSergey Zigachev  * Main teardown pass for hardware IPs.  The list of all the hardware
1816b843c749SSergey Zigachev  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
1817b843c749SSergey Zigachev  * are run.  hw_fini tears down the hardware associated with each IP
1818b843c749SSergey Zigachev  * and sw_fini tears down any software state associated with each IP.
1819b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1820b843c749SSergey Zigachev  */
amdgpu_device_ip_fini(struct amdgpu_device * adev)1821b843c749SSergey Zigachev static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1822b843c749SSergey Zigachev {
1823b843c749SSergey Zigachev 	int i, r;
1824b843c749SSergey Zigachev 
182578973132SSergey Zigachev kprintf("amdgpu_device_ip_fini: 1\n");
1826b843c749SSergey Zigachev 	amdgpu_amdkfd_device_fini(adev);
1827b843c749SSergey Zigachev 	/* need to disable SMC first */
1828b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
1829b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.hw)
1830b843c749SSergey Zigachev 			continue;
1831b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1832b843c749SSergey Zigachev 			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1833b843c749SSergey Zigachev 			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1834b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1835b843c749SSergey Zigachev 										     AMD_CG_STATE_UNGATE);
1836b843c749SSergey Zigachev 			if (r) {
1837b843c749SSergey Zigachev 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1838b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1839b843c749SSergey Zigachev 				return r;
1840b843c749SSergey Zigachev 			}
1841b843c749SSergey Zigachev 			if (adev->powerplay.pp_funcs->set_powergating_by_smu)
1842b843c749SSergey Zigachev 				amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
1843b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1844b843c749SSergey Zigachev 			/* XXX handle errors */
1845b843c749SSergey Zigachev 			if (r) {
1846b843c749SSergey Zigachev 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1847b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1848b843c749SSergey Zigachev 			}
1849b843c749SSergey Zigachev 			adev->ip_blocks[i].status.hw = false;
1850b843c749SSergey Zigachev 			break;
1851b843c749SSergey Zigachev 		}
1852b843c749SSergey Zigachev 	}
1853b843c749SSergey Zigachev 
185478973132SSergey Zigachev kprintf("amdgpu_device_ip_fini: 2\n");
1855b843c749SSergey Zigachev 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1856b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.hw)
1857b843c749SSergey Zigachev 			continue;
1858b843c749SSergey Zigachev 
1859b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1860b843c749SSergey Zigachev 			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1861b843c749SSergey Zigachev 			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
1862b843c749SSergey Zigachev 			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1863b843c749SSergey Zigachev 			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1864b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1865b843c749SSergey Zigachev 										     AMD_CG_STATE_UNGATE);
1866b843c749SSergey Zigachev 			if (r) {
1867b843c749SSergey Zigachev 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1868b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1869b843c749SSergey Zigachev 				return r;
1870b843c749SSergey Zigachev 			}
1871b843c749SSergey Zigachev 		}
1872b843c749SSergey Zigachev 
1873b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1874b843c749SSergey Zigachev 		/* XXX handle errors */
1875b843c749SSergey Zigachev 		if (r) {
1876b843c749SSergey Zigachev 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1877b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1878b843c749SSergey Zigachev 		}
1879b843c749SSergey Zigachev 
1880b843c749SSergey Zigachev 		adev->ip_blocks[i].status.hw = false;
1881b843c749SSergey Zigachev 	}
1882b843c749SSergey Zigachev 
188378973132SSergey Zigachev kprintf("amdgpu_device_ip_fini: 3\n");
1884b843c749SSergey Zigachev 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1885b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.sw)
1886b843c749SSergey Zigachev 			continue;
1887b843c749SSergey Zigachev 
1888b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1889b843c749SSergey Zigachev 			amdgpu_free_static_csa(adev);
1890b843c749SSergey Zigachev 			amdgpu_device_wb_fini(adev);
1891b843c749SSergey Zigachev 			amdgpu_device_vram_scratch_fini(adev);
1892b843c749SSergey Zigachev 		}
1893b843c749SSergey Zigachev 
1894b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1895b843c749SSergey Zigachev 		/* XXX handle errors */
1896b843c749SSergey Zigachev 		if (r) {
1897b843c749SSergey Zigachev 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1898b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
1899b843c749SSergey Zigachev 		}
1900b843c749SSergey Zigachev 		adev->ip_blocks[i].status.sw = false;
1901b843c749SSergey Zigachev 		adev->ip_blocks[i].status.valid = false;
1902b843c749SSergey Zigachev 	}
1903b843c749SSergey Zigachev 
190478973132SSergey Zigachev kprintf("amdgpu_device_ip_fini: 4\n");
1905b843c749SSergey Zigachev 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1906b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.late_initialized)
1907b843c749SSergey Zigachev 			continue;
1908b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->funcs->late_fini)
1909b843c749SSergey Zigachev 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1910b843c749SSergey Zigachev 		adev->ip_blocks[i].status.late_initialized = false;
1911b843c749SSergey Zigachev 	}
1912b843c749SSergey Zigachev 
191378973132SSergey Zigachev kprintf("amdgpu_device_ip_fini: 5\n");
1914b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
1915b843c749SSergey Zigachev 		if (amdgpu_virt_release_full_gpu(adev, false))
1916b843c749SSergey Zigachev 			DRM_ERROR("failed to release exclusive mode on fini\n");
1917b843c749SSergey Zigachev 
1918b843c749SSergey Zigachev 	return 0;
1919b843c749SSergey Zigachev }
1920b843c749SSergey Zigachev 
1921b843c749SSergey Zigachev /**
1922b843c749SSergey Zigachev  * amdgpu_device_ip_late_init_func_handler - work handler for clockgating
1923b843c749SSergey Zigachev  *
1924b843c749SSergey Zigachev  * @work: work_struct
1925b843c749SSergey Zigachev  *
1926b843c749SSergey Zigachev  * Work handler for amdgpu_device_ip_late_set_cg_state.  We put the
1927b843c749SSergey Zigachev  * clockgating setup into a worker thread to speed up driver init and
1928b843c749SSergey Zigachev  * resume from suspend.
1929b843c749SSergey Zigachev  */
amdgpu_device_ip_late_init_func_handler(struct work_struct * work)1930b843c749SSergey Zigachev static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
1931b843c749SSergey Zigachev {
1932b843c749SSergey Zigachev 	struct amdgpu_device *adev =
1933b843c749SSergey Zigachev 		container_of(work, struct amdgpu_device, late_init_work.work);
1934b843c749SSergey Zigachev 	int r;
1935b843c749SSergey Zigachev 
1936b843c749SSergey Zigachev 	r = amdgpu_ib_ring_tests(adev);
1937b843c749SSergey Zigachev 	if (r)
1938b843c749SSergey Zigachev 		DRM_ERROR("ib ring test failed (%d).\n", r);
1939b843c749SSergey Zigachev }
1940b843c749SSergey Zigachev 
1941b843c749SSergey Zigachev /**
1942b843c749SSergey Zigachev  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
1943b843c749SSergey Zigachev  *
1944b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1945b843c749SSergey Zigachev  *
1946b843c749SSergey Zigachev  * Main suspend function for hardware IPs.  The list of all the hardware
1947b843c749SSergey Zigachev  * IPs that make up the asic is walked, clockgating is disabled and the
1948b843c749SSergey Zigachev  * suspend callbacks are run.  suspend puts the hardware and software state
1949b843c749SSergey Zigachev  * in each IP into a state suitable for suspend.
1950b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1951b843c749SSergey Zigachev  */
amdgpu_device_ip_suspend_phase1(struct amdgpu_device * adev)1952b843c749SSergey Zigachev static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
1953b843c749SSergey Zigachev {
1954b843c749SSergey Zigachev 	int i, r;
1955b843c749SSergey Zigachev 
1956b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
1957b843c749SSergey Zigachev 		amdgpu_virt_request_full_gpu(adev, false);
1958b843c749SSergey Zigachev 
1959b843c749SSergey Zigachev 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1960b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
1961b843c749SSergey Zigachev 			continue;
1962b843c749SSergey Zigachev 		/* displays are handled separately */
1963b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
1964b843c749SSergey Zigachev 			/* ungate blocks so that suspend can properly shut them down */
1965b843c749SSergey Zigachev 			if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
1966b843c749SSergey Zigachev 				r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1967b843c749SSergey Zigachev 											     AMD_CG_STATE_UNGATE);
1968b843c749SSergey Zigachev 				if (r) {
1969b843c749SSergey Zigachev 					DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1970b843c749SSergey Zigachev 						  adev->ip_blocks[i].version->funcs->name, r);
1971b843c749SSergey Zigachev 				}
1972b843c749SSergey Zigachev 			}
1973b843c749SSergey Zigachev 			/* XXX handle errors */
1974b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->suspend(adev);
1975b843c749SSergey Zigachev 			/* XXX handle errors */
1976b843c749SSergey Zigachev 			if (r) {
1977b843c749SSergey Zigachev 				DRM_ERROR("suspend of IP block <%s> failed %d\n",
1978b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
1979b843c749SSergey Zigachev 			}
1980b843c749SSergey Zigachev 		}
1981b843c749SSergey Zigachev 	}
1982b843c749SSergey Zigachev 
1983b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
1984b843c749SSergey Zigachev 		amdgpu_virt_release_full_gpu(adev, false);
1985b843c749SSergey Zigachev 
1986b843c749SSergey Zigachev 	return 0;
1987b843c749SSergey Zigachev }
1988b843c749SSergey Zigachev 
1989b843c749SSergey Zigachev /**
1990b843c749SSergey Zigachev  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
1991b843c749SSergey Zigachev  *
1992b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1993b843c749SSergey Zigachev  *
1994b843c749SSergey Zigachev  * Main suspend function for hardware IPs.  The list of all the hardware
1995b843c749SSergey Zigachev  * IPs that make up the asic is walked, clockgating is disabled and the
1996b843c749SSergey Zigachev  * suspend callbacks are run.  suspend puts the hardware and software state
1997b843c749SSergey Zigachev  * in each IP into a state suitable for suspend.
1998b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
1999b843c749SSergey Zigachev  */
amdgpu_device_ip_suspend_phase2(struct amdgpu_device * adev)2000b843c749SSergey Zigachev static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2001b843c749SSergey Zigachev {
2002b843c749SSergey Zigachev 	int i, r;
2003b843c749SSergey Zigachev 
2004b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
2005b843c749SSergey Zigachev 		amdgpu_virt_request_full_gpu(adev, false);
2006b843c749SSergey Zigachev 
2007b843c749SSergey Zigachev 	/* ungate SMC block first */
2008b843c749SSergey Zigachev 	r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
2009b843c749SSergey Zigachev 						   AMD_CG_STATE_UNGATE);
2010b843c749SSergey Zigachev 	if (r) {
2011b843c749SSergey Zigachev 		DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
2012b843c749SSergey Zigachev 	}
2013b843c749SSergey Zigachev 
2014b843c749SSergey Zigachev 	/* call smu to disable gfx off feature first when suspend */
2015b843c749SSergey Zigachev 	if (adev->powerplay.pp_funcs->set_powergating_by_smu)
2016b843c749SSergey Zigachev 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
2017b843c749SSergey Zigachev 
2018b843c749SSergey Zigachev 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2019b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
2020b843c749SSergey Zigachev 			continue;
2021b843c749SSergey Zigachev 		/* displays are handled in phase1 */
2022b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2023b843c749SSergey Zigachev 			continue;
2024b843c749SSergey Zigachev 		/* ungate blocks so that suspend can properly shut them down */
2025b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
2026b843c749SSergey Zigachev 			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2027b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2028b843c749SSergey Zigachev 										     AMD_CG_STATE_UNGATE);
2029b843c749SSergey Zigachev 			if (r) {
2030b843c749SSergey Zigachev 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
2031b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
2032b843c749SSergey Zigachev 			}
2033b843c749SSergey Zigachev 		}
2034b843c749SSergey Zigachev 		/* XXX handle errors */
2035b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2036b843c749SSergey Zigachev 		/* XXX handle errors */
2037b843c749SSergey Zigachev 		if (r) {
2038b843c749SSergey Zigachev 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2039b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
2040b843c749SSergey Zigachev 		}
2041b843c749SSergey Zigachev 	}
2042b843c749SSergey Zigachev 
2043b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
2044b843c749SSergey Zigachev 		amdgpu_virt_release_full_gpu(adev, false);
2045b843c749SSergey Zigachev 
2046b843c749SSergey Zigachev 	return 0;
2047b843c749SSergey Zigachev }
2048b843c749SSergey Zigachev 
2049b843c749SSergey Zigachev /**
2050b843c749SSergey Zigachev  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2051b843c749SSergey Zigachev  *
2052b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2053b843c749SSergey Zigachev  *
2054b843c749SSergey Zigachev  * Main suspend function for hardware IPs.  The list of all the hardware
2055b843c749SSergey Zigachev  * IPs that make up the asic is walked, clockgating is disabled and the
2056b843c749SSergey Zigachev  * suspend callbacks are run.  suspend puts the hardware and software state
2057b843c749SSergey Zigachev  * in each IP into a state suitable for suspend.
2058b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
2059b843c749SSergey Zigachev  */
amdgpu_device_ip_suspend(struct amdgpu_device * adev)2060b843c749SSergey Zigachev int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2061b843c749SSergey Zigachev {
2062b843c749SSergey Zigachev 	int r;
2063b843c749SSergey Zigachev 
2064b843c749SSergey Zigachev 	r = amdgpu_device_ip_suspend_phase1(adev);
2065b843c749SSergey Zigachev 	if (r)
2066b843c749SSergey Zigachev 		return r;
2067b843c749SSergey Zigachev 	r = amdgpu_device_ip_suspend_phase2(adev);
2068b843c749SSergey Zigachev 
2069b843c749SSergey Zigachev 	return r;
2070b843c749SSergey Zigachev }
2071b843c749SSergey Zigachev 
amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device * adev)2072b843c749SSergey Zigachev static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2073b843c749SSergey Zigachev {
2074b843c749SSergey Zigachev 	int i, r;
2075b843c749SSergey Zigachev 
2076b843c749SSergey Zigachev 	static enum amd_ip_block_type ip_order[] = {
2077b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_GMC,
2078b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_COMMON,
2079b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_PSP,
2080b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_IH,
2081b843c749SSergey Zigachev 	};
2082b843c749SSergey Zigachev 
2083b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
2084b843c749SSergey Zigachev 		int j;
2085b843c749SSergey Zigachev 		struct amdgpu_ip_block *block;
2086b843c749SSergey Zigachev 
2087b843c749SSergey Zigachev 		for (j = 0; j < adev->num_ip_blocks; j++) {
2088b843c749SSergey Zigachev 			block = &adev->ip_blocks[j];
2089b843c749SSergey Zigachev 
2090b843c749SSergey Zigachev 			if (block->version->type != ip_order[i] ||
2091b843c749SSergey Zigachev 				!block->status.valid)
2092b843c749SSergey Zigachev 				continue;
2093b843c749SSergey Zigachev 
2094b843c749SSergey Zigachev 			r = block->version->funcs->hw_init(adev);
2095b843c749SSergey Zigachev 			DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2096b843c749SSergey Zigachev 			if (r)
2097b843c749SSergey Zigachev 				return r;
2098b843c749SSergey Zigachev 		}
2099b843c749SSergey Zigachev 	}
2100b843c749SSergey Zigachev 
2101b843c749SSergey Zigachev 	return 0;
2102b843c749SSergey Zigachev }
2103b843c749SSergey Zigachev 
amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device * adev)2104b843c749SSergey Zigachev static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2105b843c749SSergey Zigachev {
2106b843c749SSergey Zigachev 	int i, r;
2107b843c749SSergey Zigachev 
2108b843c749SSergey Zigachev 	static enum amd_ip_block_type ip_order[] = {
2109b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_SMC,
2110b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_DCE,
2111b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_GFX,
2112b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_SDMA,
2113b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_UVD,
2114b843c749SSergey Zigachev 		AMD_IP_BLOCK_TYPE_VCE
2115b843c749SSergey Zigachev 	};
2116b843c749SSergey Zigachev 
2117b843c749SSergey Zigachev 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2118b843c749SSergey Zigachev 		int j;
2119b843c749SSergey Zigachev 		struct amdgpu_ip_block *block;
2120b843c749SSergey Zigachev 
2121b843c749SSergey Zigachev 		for (j = 0; j < adev->num_ip_blocks; j++) {
2122b843c749SSergey Zigachev 			block = &adev->ip_blocks[j];
2123b843c749SSergey Zigachev 
2124b843c749SSergey Zigachev 			if (block->version->type != ip_order[i] ||
2125b843c749SSergey Zigachev 				!block->status.valid)
2126b843c749SSergey Zigachev 				continue;
2127b843c749SSergey Zigachev 
2128b843c749SSergey Zigachev 			r = block->version->funcs->hw_init(adev);
2129b843c749SSergey Zigachev 			DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2130b843c749SSergey Zigachev 			if (r)
2131b843c749SSergey Zigachev 				return r;
2132b843c749SSergey Zigachev 		}
2133b843c749SSergey Zigachev 	}
2134b843c749SSergey Zigachev 
2135b843c749SSergey Zigachev 	return 0;
2136b843c749SSergey Zigachev }
2137b843c749SSergey Zigachev 
2138b843c749SSergey Zigachev /**
2139b843c749SSergey Zigachev  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2140b843c749SSergey Zigachev  *
2141b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2142b843c749SSergey Zigachev  *
2143b843c749SSergey Zigachev  * First resume function for hardware IPs.  The list of all the hardware
2144b843c749SSergey Zigachev  * IPs that make up the asic is walked and the resume callbacks are run for
2145b843c749SSergey Zigachev  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2146b843c749SSergey Zigachev  * after a suspend and updates the software state as necessary.  This
2147b843c749SSergey Zigachev  * function is also used for restoring the GPU after a GPU reset.
2148b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
2149b843c749SSergey Zigachev  */
amdgpu_device_ip_resume_phase1(struct amdgpu_device * adev)2150b843c749SSergey Zigachev static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2151b843c749SSergey Zigachev {
2152b843c749SSergey Zigachev 	int i, r;
2153b843c749SSergey Zigachev 
2154b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
2155b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
2156b843c749SSergey Zigachev 			continue;
2157b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2158b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2159b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2160b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->resume(adev);
2161b843c749SSergey Zigachev 			if (r) {
2162b843c749SSergey Zigachev 				DRM_ERROR("resume of IP block <%s> failed %d\n",
2163b843c749SSergey Zigachev 					  adev->ip_blocks[i].version->funcs->name, r);
2164b843c749SSergey Zigachev 				return r;
2165b843c749SSergey Zigachev 			}
2166b843c749SSergey Zigachev 		}
2167b843c749SSergey Zigachev 	}
2168b843c749SSergey Zigachev 
2169b843c749SSergey Zigachev 	return 0;
2170b843c749SSergey Zigachev }
2171b843c749SSergey Zigachev 
2172b843c749SSergey Zigachev /**
2173b843c749SSergey Zigachev  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2174b843c749SSergey Zigachev  *
2175b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2176b843c749SSergey Zigachev  *
2177b843c749SSergey Zigachev  * First resume function for hardware IPs.  The list of all the hardware
2178b843c749SSergey Zigachev  * IPs that make up the asic is walked and the resume callbacks are run for
2179b843c749SSergey Zigachev  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2180b843c749SSergey Zigachev  * functional state after a suspend and updates the software state as
2181b843c749SSergey Zigachev  * necessary.  This function is also used for restoring the GPU after a GPU
2182b843c749SSergey Zigachev  * reset.
2183b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
2184b843c749SSergey Zigachev  */
amdgpu_device_ip_resume_phase2(struct amdgpu_device * adev)2185b843c749SSergey Zigachev static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2186b843c749SSergey Zigachev {
2187b843c749SSergey Zigachev 	int i, r;
2188b843c749SSergey Zigachev 
2189b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
2190b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
2191b843c749SSergey Zigachev 			continue;
2192b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2193b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2194b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
2195b843c749SSergey Zigachev 			continue;
2196b843c749SSergey Zigachev 		r = adev->ip_blocks[i].version->funcs->resume(adev);
2197b843c749SSergey Zigachev 		if (r) {
2198b843c749SSergey Zigachev 			DRM_ERROR("resume of IP block <%s> failed %d\n",
2199b843c749SSergey Zigachev 				  adev->ip_blocks[i].version->funcs->name, r);
2200b843c749SSergey Zigachev 			return r;
2201b843c749SSergey Zigachev 		}
2202b843c749SSergey Zigachev 	}
2203b843c749SSergey Zigachev 
2204b843c749SSergey Zigachev 	return 0;
2205b843c749SSergey Zigachev }
2206b843c749SSergey Zigachev 
2207b843c749SSergey Zigachev /**
2208b843c749SSergey Zigachev  * amdgpu_device_ip_resume - run resume for hardware IPs
2209b843c749SSergey Zigachev  *
2210b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2211b843c749SSergey Zigachev  *
2212b843c749SSergey Zigachev  * Main resume function for hardware IPs.  The hardware IPs
2213b843c749SSergey Zigachev  * are split into two resume functions because they are
2214b843c749SSergey Zigachev  * are also used in in recovering from a GPU reset and some additional
2215b843c749SSergey Zigachev  * steps need to be take between them.  In this case (S3/S4) they are
2216b843c749SSergey Zigachev  * run sequentially.
2217b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
2218b843c749SSergey Zigachev  */
amdgpu_device_ip_resume(struct amdgpu_device * adev)2219b843c749SSergey Zigachev static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2220b843c749SSergey Zigachev {
2221b843c749SSergey Zigachev 	int r;
2222b843c749SSergey Zigachev 
2223b843c749SSergey Zigachev 	r = amdgpu_device_ip_resume_phase1(adev);
2224b843c749SSergey Zigachev 	if (r)
2225b843c749SSergey Zigachev 		return r;
2226b843c749SSergey Zigachev 	r = amdgpu_device_ip_resume_phase2(adev);
2227b843c749SSergey Zigachev 
2228b843c749SSergey Zigachev 	return r;
2229b843c749SSergey Zigachev }
2230b843c749SSergey Zigachev 
2231b843c749SSergey Zigachev /**
2232b843c749SSergey Zigachev  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2233b843c749SSergey Zigachev  *
2234b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2235b843c749SSergey Zigachev  *
2236b843c749SSergey Zigachev  * Query the VBIOS data tables to determine if the board supports SR-IOV.
2237b843c749SSergey Zigachev  */
amdgpu_device_detect_sriov_bios(struct amdgpu_device * adev)2238b843c749SSergey Zigachev static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2239b843c749SSergey Zigachev {
2240b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev)) {
2241b843c749SSergey Zigachev 		if (adev->is_atom_fw) {
2242b843c749SSergey Zigachev 			if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2243b843c749SSergey Zigachev 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2244b843c749SSergey Zigachev 		} else {
2245b843c749SSergey Zigachev 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2246b843c749SSergey Zigachev 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2247b843c749SSergey Zigachev 		}
2248b843c749SSergey Zigachev 
2249b843c749SSergey Zigachev 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2250b843c749SSergey Zigachev 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2251b843c749SSergey Zigachev 	}
2252b843c749SSergey Zigachev }
2253b843c749SSergey Zigachev 
2254b843c749SSergey Zigachev /**
2255b843c749SSergey Zigachev  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2256b843c749SSergey Zigachev  *
2257b843c749SSergey Zigachev  * @asic_type: AMD asic type
2258b843c749SSergey Zigachev  *
2259b843c749SSergey Zigachev  * Check if there is DC (new modesetting infrastructre) support for an asic.
2260b843c749SSergey Zigachev  * returns true if DC has support, false if not.
2261b843c749SSergey Zigachev  */
amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)2262b843c749SSergey Zigachev bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2263b843c749SSergey Zigachev {
2264b843c749SSergey Zigachev 	switch (asic_type) {
2265b843c749SSergey Zigachev #if defined(CONFIG_DRM_AMD_DC)
2266b843c749SSergey Zigachev 	case CHIP_BONAIRE:
2267b843c749SSergey Zigachev 	case CHIP_KAVERI:
2268b843c749SSergey Zigachev 	case CHIP_KABINI:
2269b843c749SSergey Zigachev 	case CHIP_MULLINS:
2270b843c749SSergey Zigachev 		/*
2271b843c749SSergey Zigachev 		 * We have systems in the wild with these ASICs that require
2272b843c749SSergey Zigachev 		 * LVDS and VGA support which is not supported with DC.
2273b843c749SSergey Zigachev 		 *
2274b843c749SSergey Zigachev 		 * Fallback to the non-DC driver here by default so as not to
2275b843c749SSergey Zigachev 		 * cause regressions.
2276b843c749SSergey Zigachev 		 */
2277b843c749SSergey Zigachev 		return amdgpu_dc > 0;
2278b843c749SSergey Zigachev 	case CHIP_HAWAII:
2279b843c749SSergey Zigachev 	case CHIP_CARRIZO:
2280b843c749SSergey Zigachev 	case CHIP_STONEY:
2281b843c749SSergey Zigachev 	case CHIP_POLARIS10:
2282b843c749SSergey Zigachev 	case CHIP_POLARIS11:
2283b843c749SSergey Zigachev 	case CHIP_POLARIS12:
2284b843c749SSergey Zigachev 	case CHIP_VEGAM:
2285b843c749SSergey Zigachev 	case CHIP_TONGA:
2286b843c749SSergey Zigachev 	case CHIP_FIJI:
2287b843c749SSergey Zigachev 	case CHIP_VEGA10:
2288b843c749SSergey Zigachev 	case CHIP_VEGA12:
2289b843c749SSergey Zigachev 	case CHIP_VEGA20:
2290b843c749SSergey Zigachev #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2291b843c749SSergey Zigachev 	case CHIP_RAVEN:
2292b843c749SSergey Zigachev #endif
2293b843c749SSergey Zigachev 		return amdgpu_dc != 0;
2294b843c749SSergey Zigachev #endif
2295b843c749SSergey Zigachev 	default:
2296b843c749SSergey Zigachev 		return false;
2297b843c749SSergey Zigachev 	}
2298b843c749SSergey Zigachev }
2299b843c749SSergey Zigachev 
2300b843c749SSergey Zigachev /**
2301b843c749SSergey Zigachev  * amdgpu_device_has_dc_support - check if dc is supported
2302b843c749SSergey Zigachev  *
2303b843c749SSergey Zigachev  * @adev: amdgpu_device_pointer
2304b843c749SSergey Zigachev  *
2305b843c749SSergey Zigachev  * Returns true for supported, false for not supported
2306b843c749SSergey Zigachev  */
amdgpu_device_has_dc_support(struct amdgpu_device * adev)2307b843c749SSergey Zigachev bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2308b843c749SSergey Zigachev {
2309b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
2310b843c749SSergey Zigachev 		return false;
2311b843c749SSergey Zigachev 
2312b843c749SSergey Zigachev 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
2313b843c749SSergey Zigachev }
2314b843c749SSergey Zigachev 
2315b843c749SSergey Zigachev /**
2316b843c749SSergey Zigachev  * amdgpu_device_init - initialize the driver
2317b843c749SSergey Zigachev  *
2318b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2319b843c749SSergey Zigachev  * @ddev: drm dev pointer
2320b843c749SSergey Zigachev  * @pdev: pci dev pointer
2321b843c749SSergey Zigachev  * @flags: driver flags
2322b843c749SSergey Zigachev  *
2323b843c749SSergey Zigachev  * Initializes the driver info and hw (all asics).
2324b843c749SSergey Zigachev  * Returns 0 for success or an error on failure.
2325b843c749SSergey Zigachev  * Called at driver startup.
2326b843c749SSergey Zigachev  */
amdgpu_device_init(struct amdgpu_device * adev,struct drm_device * ddev,struct pci_dev * pdev,uint32_t flags)2327b843c749SSergey Zigachev int amdgpu_device_init(struct amdgpu_device *adev,
2328b843c749SSergey Zigachev 		       struct drm_device *ddev,
2329b843c749SSergey Zigachev 		       struct pci_dev *pdev,
2330b843c749SSergey Zigachev 		       uint32_t flags)
2331b843c749SSergey Zigachev {
2332b843c749SSergey Zigachev 	int r, i;
2333b843c749SSergey Zigachev 	bool runtime = false;
2334b843c749SSergey Zigachev 	u32 max_MBps;
2335b843c749SSergey Zigachev 
233678973132SSergey Zigachev kprintf("amdgpu_device_init: start\n");
2337b843c749SSergey Zigachev 	adev->shutdown = false;
2338b843c749SSergey Zigachev 	adev->dev = &pdev->dev;
2339b843c749SSergey Zigachev 	adev->ddev = ddev;
2340b843c749SSergey Zigachev 	adev->pdev = pdev;
2341b843c749SSergey Zigachev 	adev->flags = flags;
2342b843c749SSergey Zigachev 	adev->asic_type = flags & AMD_ASIC_MASK;
2343b843c749SSergey Zigachev 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2344b843c749SSergey Zigachev 	if (amdgpu_emu_mode == 1)
2345b843c749SSergey Zigachev 		adev->usec_timeout *= 2;
2346b843c749SSergey Zigachev 	adev->gmc.gart_size = 512 * 1024 * 1024;
2347b843c749SSergey Zigachev 	adev->accel_working = false;
234878973132SSergey Zigachev #ifdef __DragonFly__
234978973132SSergey Zigachev 	adev->fictitious_range_registered = false;
235078973132SSergey Zigachev #endif
2351b843c749SSergey Zigachev 	adev->num_rings = 0;
2352b843c749SSergey Zigachev 	adev->mman.buffer_funcs = NULL;
2353b843c749SSergey Zigachev 	adev->mman.buffer_funcs_ring = NULL;
2354b843c749SSergey Zigachev 	adev->vm_manager.vm_pte_funcs = NULL;
2355b843c749SSergey Zigachev 	adev->vm_manager.vm_pte_num_rings = 0;
2356b843c749SSergey Zigachev 	adev->gmc.gmc_funcs = NULL;
2357b843c749SSergey Zigachev 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2358b843c749SSergey Zigachev 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2359b843c749SSergey Zigachev 
2360b843c749SSergey Zigachev 	adev->smc_rreg = &amdgpu_invalid_rreg;
2361b843c749SSergey Zigachev 	adev->smc_wreg = &amdgpu_invalid_wreg;
2362b843c749SSergey Zigachev 	adev->pcie_rreg = &amdgpu_invalid_rreg;
2363b843c749SSergey Zigachev 	adev->pcie_wreg = &amdgpu_invalid_wreg;
2364b843c749SSergey Zigachev 	adev->pciep_rreg = &amdgpu_invalid_rreg;
2365b843c749SSergey Zigachev 	adev->pciep_wreg = &amdgpu_invalid_wreg;
2366b843c749SSergey Zigachev 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2367b843c749SSergey Zigachev 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2368b843c749SSergey Zigachev 	adev->didt_rreg = &amdgpu_invalid_rreg;
2369b843c749SSergey Zigachev 	adev->didt_wreg = &amdgpu_invalid_wreg;
2370b843c749SSergey Zigachev 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2371b843c749SSergey Zigachev 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2372b843c749SSergey Zigachev 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2373b843c749SSergey Zigachev 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2374b843c749SSergey Zigachev 
2375b843c749SSergey Zigachev 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2376b843c749SSergey Zigachev 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2377b843c749SSergey Zigachev 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2378b843c749SSergey Zigachev 
2379b843c749SSergey Zigachev 	/* mutex initialization are all done here so we
2380b843c749SSergey Zigachev 	 * can recall function without having locking issues */
2381b843c749SSergey Zigachev 	atomic_set(&adev->irq.ih.lock, 0);
238278973132SSergey Zigachev 	lockinit(&adev->firmware.mutex, "agfwm", 0, LK_CANRECURSE);
238378973132SSergey Zigachev 	lockinit(&adev->pm.mutex, "agpmm", 0, LK_CANRECURSE);
238478973132SSergey Zigachev 	lockinit(&adev->gfx.gpu_clock_mutex, "agggcm", 0, LK_CANRECURSE);
238578973132SSergey Zigachev 	lockinit(&adev->srbm_mutex, "agsm", 0, LK_CANRECURSE);
238678973132SSergey Zigachev 	lockinit(&adev->gfx.pipe_reserve_mutex, "aggprm", 0, LK_CANRECURSE);
238778973132SSergey Zigachev 	lockinit(&adev->grbm_idx_mutex, "aggim", 0, LK_CANRECURSE);
238878973132SSergey Zigachev 	lockinit(&adev->mn_lock, "agaml", 0, LK_CANRECURSE);
238978973132SSergey Zigachev 	lockinit(&adev->virt.vf_errors.lock, "agvfel", 0, LK_CANRECURSE);
2390b843c749SSergey Zigachev 	hash_init(adev->mn_hash);
239178973132SSergey Zigachev 	lockinit(&adev->lock_reset, "aglr", 0, LK_CANRECURSE);
2392b843c749SSergey Zigachev 
2393b843c749SSergey Zigachev 	amdgpu_device_check_arguments(adev);
2394b843c749SSergey Zigachev 
239578973132SSergey Zigachev 	lockinit(&adev->mmio_idx_lock, "agmil", 0, LK_CANRECURSE);
239678973132SSergey Zigachev 	lockinit(&adev->smc_idx_lock, "agsil", 0, LK_CANRECURSE);
239778973132SSergey Zigachev 	lockinit(&adev->pcie_idx_lock, "agpil", 0, LK_CANRECURSE);
239878973132SSergey Zigachev 	lockinit(&adev->uvd_ctx_idx_lock, "agucil", 0, LK_CANRECURSE);
239978973132SSergey Zigachev 	lockinit(&adev->didt_idx_lock, "agdil", 0, LK_CANRECURSE);
240078973132SSergey Zigachev 	lockinit(&adev->gc_cac_idx_lock, "aggcil", 0, LK_CANRECURSE);
240178973132SSergey Zigachev 	lockinit(&adev->se_cac_idx_lock, "agscil", 0, LK_CANRECURSE);
240278973132SSergey Zigachev 	lockinit(&adev->audio_endpt_idx_lock, "agaeil", 0, LK_CANRECURSE);
240378973132SSergey Zigachev 	spin_init(&adev->mm_stats.lock, "agammsl");
2404b843c749SSergey Zigachev 
2405b843c749SSergey Zigachev 	INIT_LIST_HEAD(&adev->shadow_list);
240678973132SSergey Zigachev 	lockinit(&adev->shadow_list_lock, "agasll", 0, LK_CANRECURSE);
2407b843c749SSergey Zigachev 
2408b843c749SSergey Zigachev 	INIT_LIST_HEAD(&adev->ring_lru_list);
240978973132SSergey Zigachev 	spin_init(&adev->ring_lru_list_lock, "agrlll");
2410b843c749SSergey Zigachev 
2411b843c749SSergey Zigachev 	INIT_DELAYED_WORK(&adev->late_init_work,
2412b843c749SSergey Zigachev 			  amdgpu_device_ip_late_init_func_handler);
2413b843c749SSergey Zigachev 
2414b843c749SSergey Zigachev 	adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
2415b843c749SSergey Zigachev 
241678973132SSergey Zigachev kprintf("amdgpu_device_init: 1\n");
2417b843c749SSergey Zigachev 	/* Registers mapping */
2418b843c749SSergey Zigachev 	/* TODO: block userspace mapping of io register */
2419b843c749SSergey Zigachev 	if (adev->asic_type >= CHIP_BONAIRE) {
2420b843c749SSergey Zigachev 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2421b843c749SSergey Zigachev 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2422b843c749SSergey Zigachev 	} else {
2423b843c749SSergey Zigachev 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2424b843c749SSergey Zigachev 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2425b843c749SSergey Zigachev 	}
2426b843c749SSergey Zigachev 
2427b843c749SSergey Zigachev 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2428b843c749SSergey Zigachev 	if (adev->rmmio == NULL) {
2429b843c749SSergey Zigachev 		return -ENOMEM;
2430b843c749SSergey Zigachev 	}
2431b843c749SSergey Zigachev 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2432b843c749SSergey Zigachev 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2433b843c749SSergey Zigachev 
2434b843c749SSergey Zigachev 	/* doorbell bar mapping */
2435b843c749SSergey Zigachev 	amdgpu_device_doorbell_init(adev);
2436b843c749SSergey Zigachev 
243778973132SSergey Zigachev kprintf("amdgpu_device_init: 2\n");
2438b843c749SSergey Zigachev 	/* io port mapping */
2439b843c749SSergey Zigachev 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
244078973132SSergey Zigachev 		uint32_t data;
244178973132SSergey Zigachev 
244278973132SSergey Zigachev 		kprintf("amdgpu_device_init: for loop %d\n", i);
244378973132SSergey Zigachev 		data = pci_read_config(adev->dev->bsddev, PCIR_BAR(i), 4);
244478973132SSergey Zigachev 		if (PCI_BAR_IO(data)) {
244578973132SSergey Zigachev 			adev->rio_rid = PCIR_BAR(i);
244678973132SSergey Zigachev 			adev->rio_mem = bus_alloc_resource_any(adev->dev->bsddev,
244778973132SSergey Zigachev 			    SYS_RES_IOPORT, &adev->rio_rid,
244878973132SSergey Zigachev 			    RF_ACTIVE | RF_SHAREABLE);
244978973132SSergey Zigachev 			adev->rio_mem_size = rman_get_size(adev->rio_mem);
245078973132SSergey Zigachev kprintf("amdgpu_device_init: rio_rid=%d\n", adev->rio_rid);
245178973132SSergey Zigachev kprintf("amdgpu_device_init: mem_size=%ld\n", adev->rio_mem_size);
2452b843c749SSergey Zigachev 			break;
2453b843c749SSergey Zigachev 		}
2454b843c749SSergey Zigachev 	}
2455b843c749SSergey Zigachev 	if (adev->rio_mem == NULL)
2456b843c749SSergey Zigachev 		DRM_INFO("PCI I/O BAR is not found.\n");
2457b843c749SSergey Zigachev 
245878973132SSergey Zigachev kprintf("amdgpu_device_init: 3\n");
2459b843c749SSergey Zigachev 	amdgpu_device_get_pcie_info(adev);
2460b843c749SSergey Zigachev 
2461b843c749SSergey Zigachev 	/* early init functions */
2462b843c749SSergey Zigachev 	r = amdgpu_device_ip_early_init(adev);
2463b843c749SSergey Zigachev 	if (r)
2464b843c749SSergey Zigachev 		return r;
2465b843c749SSergey Zigachev 
2466b843c749SSergey Zigachev 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2467b843c749SSergey Zigachev 	/* this will fail for cards that aren't VGA class devices, just
2468b843c749SSergey Zigachev 	 * ignore it */
246978973132SSergey Zigachev #if 0
2470b843c749SSergey Zigachev 	vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
247178973132SSergey Zigachev #endif
2472b843c749SSergey Zigachev 
2473b843c749SSergey Zigachev 	if (amdgpu_device_is_px(ddev))
2474b843c749SSergey Zigachev 		runtime = true;
247578973132SSergey Zigachev #if 0
2476b843c749SSergey Zigachev 	if (!pci_is_thunderbolt_attached(adev->pdev))
2477b843c749SSergey Zigachev 		vga_switcheroo_register_client(adev->pdev,
2478b843c749SSergey Zigachev 					       &amdgpu_switcheroo_ops, runtime);
2479b843c749SSergey Zigachev 	if (runtime)
2480b843c749SSergey Zigachev 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
248178973132SSergey Zigachev #endif
2482b843c749SSergey Zigachev 
2483b843c749SSergey Zigachev 	if (amdgpu_emu_mode == 1) {
2484b843c749SSergey Zigachev 		/* post the asic on emulation mode */
2485b843c749SSergey Zigachev 		emu_soc_asic_init(adev);
2486b843c749SSergey Zigachev 		goto fence_driver_init;
2487b843c749SSergey Zigachev 	}
248878973132SSergey Zigachev kprintf("amdgpu_device_init: 4\n");
2489b843c749SSergey Zigachev 	/* Read BIOS */
2490b843c749SSergey Zigachev 	if (!amdgpu_get_bios(adev)) {
2491b843c749SSergey Zigachev 		r = -EINVAL;
2492b843c749SSergey Zigachev 		goto failed;
2493b843c749SSergey Zigachev 	}
2494b843c749SSergey Zigachev 
249578973132SSergey Zigachev kprintf("amdgpu_device_init: 5\n");
2496b843c749SSergey Zigachev 	r = amdgpu_atombios_init(adev);
2497b843c749SSergey Zigachev 	if (r) {
2498b843c749SSergey Zigachev 		dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2499b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2500b843c749SSergey Zigachev 		goto failed;
2501b843c749SSergey Zigachev 	}
2502b843c749SSergey Zigachev 
2503b843c749SSergey Zigachev 	/* detect if we are with an SRIOV vbios */
2504b843c749SSergey Zigachev 	amdgpu_device_detect_sriov_bios(adev);
2505b843c749SSergey Zigachev 
2506b843c749SSergey Zigachev 	/* Post card if necessary */
2507b843c749SSergey Zigachev 	if (amdgpu_device_need_post(adev)) {
2508b843c749SSergey Zigachev 		if (!adev->bios) {
2509b843c749SSergey Zigachev 			dev_err(adev->dev, "no vBIOS found\n");
2510b843c749SSergey Zigachev 			r = -EINVAL;
2511b843c749SSergey Zigachev 			goto failed;
2512b843c749SSergey Zigachev 		}
2513b843c749SSergey Zigachev 		DRM_INFO("GPU posting now...\n");
2514b843c749SSergey Zigachev 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2515b843c749SSergey Zigachev 		if (r) {
2516b843c749SSergey Zigachev 			dev_err(adev->dev, "gpu post error!\n");
2517b843c749SSergey Zigachev 			goto failed;
2518b843c749SSergey Zigachev 		}
2519b843c749SSergey Zigachev 	}
2520b843c749SSergey Zigachev 
252178973132SSergey Zigachev kprintf("amdgpu_device_init: 6\n");
2522b843c749SSergey Zigachev 	if (adev->is_atom_fw) {
252378973132SSergey Zigachev kprintf("amdgpu_device_init: 6.1\n");
2524b843c749SSergey Zigachev 		/* Initialize clocks */
2525b843c749SSergey Zigachev 		r = amdgpu_atomfirmware_get_clock_info(adev);
2526b843c749SSergey Zigachev 		if (r) {
2527b843c749SSergey Zigachev 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2528b843c749SSergey Zigachev 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2529b843c749SSergey Zigachev 			goto failed;
2530b843c749SSergey Zigachev 		}
2531b843c749SSergey Zigachev 	} else {
253278973132SSergey Zigachev kprintf("amdgpu_device_init: 6.2\n");
2533b843c749SSergey Zigachev 		/* Initialize clocks */
2534b843c749SSergey Zigachev 		r = amdgpu_atombios_get_clock_info(adev);
2535b843c749SSergey Zigachev 		if (r) {
2536b843c749SSergey Zigachev 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2537b843c749SSergey Zigachev 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2538b843c749SSergey Zigachev 			goto failed;
2539b843c749SSergey Zigachev 		}
254078973132SSergey Zigachev kprintf("amdgpu_device_init: 6.3\n");
2541b843c749SSergey Zigachev 		/* init i2c buses */
2542b843c749SSergey Zigachev 		if (!amdgpu_device_has_dc_support(adev))
2543b843c749SSergey Zigachev 			amdgpu_atombios_i2c_init(adev);
2544b843c749SSergey Zigachev 	}
2545b843c749SSergey Zigachev 
2546b843c749SSergey Zigachev fence_driver_init:
254778973132SSergey Zigachev kprintf("amdgpu_device_init: 7\n");
2548b843c749SSergey Zigachev 	/* Fence driver */
2549b843c749SSergey Zigachev 	r = amdgpu_fence_driver_init(adev);
2550b843c749SSergey Zigachev 	if (r) {
2551b843c749SSergey Zigachev 		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2552b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
2553b843c749SSergey Zigachev 		goto failed;
2554b843c749SSergey Zigachev 	}
2555b843c749SSergey Zigachev 
255678973132SSergey Zigachev kprintf("amdgpu_device_init: 8\n");
2557b843c749SSergey Zigachev 	/* init the mode config */
2558b843c749SSergey Zigachev 	drm_mode_config_init(adev->ddev);
2559b843c749SSergey Zigachev 
2560b843c749SSergey Zigachev 	r = amdgpu_device_ip_init(adev);
2561b843c749SSergey Zigachev 	if (r) {
2562b843c749SSergey Zigachev 		/* failed in exclusive mode due to timeout */
2563b843c749SSergey Zigachev 		if (amdgpu_sriov_vf(adev) &&
2564b843c749SSergey Zigachev 		    !amdgpu_sriov_runtime(adev) &&
2565b843c749SSergey Zigachev 		    amdgpu_virt_mmio_blocked(adev) &&
2566b843c749SSergey Zigachev 		    !amdgpu_virt_wait_reset(adev)) {
2567b843c749SSergey Zigachev 			dev_err(adev->dev, "VF exclusive mode timeout\n");
2568b843c749SSergey Zigachev 			/* Don't send request since VF is inactive. */
2569b843c749SSergey Zigachev 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2570b843c749SSergey Zigachev 			adev->virt.ops = NULL;
2571b843c749SSergey Zigachev 			r = -EAGAIN;
2572b843c749SSergey Zigachev 			goto failed;
2573b843c749SSergey Zigachev 		}
2574b843c749SSergey Zigachev 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
2575b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2576b843c749SSergey Zigachev 		goto failed;
2577b843c749SSergey Zigachev 	}
257878973132SSergey Zigachev kprintf("amdgpu_device_init: 9\n");
2579b843c749SSergey Zigachev 	adev->accel_working = true;
2580b843c749SSergey Zigachev 
2581b843c749SSergey Zigachev 	amdgpu_vm_check_compute_bug(adev);
2582b843c749SSergey Zigachev 
2583b843c749SSergey Zigachev 	/* Initialize the buffer migration limit. */
2584b843c749SSergey Zigachev 	if (amdgpu_moverate >= 0)
2585b843c749SSergey Zigachev 		max_MBps = amdgpu_moverate;
2586b843c749SSergey Zigachev 	else
2587b843c749SSergey Zigachev 		max_MBps = 8; /* Allow 8 MB/s. */
2588b843c749SSergey Zigachev 	/* Get a log2 for easy divisions. */
2589b843c749SSergey Zigachev 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2590b843c749SSergey Zigachev 
2591b843c749SSergey Zigachev 	r = amdgpu_ib_pool_init(adev);
2592b843c749SSergey Zigachev 	if (r) {
2593b843c749SSergey Zigachev 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2594b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2595b843c749SSergey Zigachev 		goto failed;
2596b843c749SSergey Zigachev 	}
2597b843c749SSergey Zigachev 
259878973132SSergey Zigachev #ifdef __DragonFly__
259978973132SSergey Zigachev 	DRM_INFO("%s: Taking over the fictitious range 0x%lx-0x%llx\n", __func__,
260078973132SSergey Zigachev 	    (uintmax_t)adev->gmc.aper_base,
260178973132SSergey Zigachev 	    (uintmax_t)adev->gmc.aper_base + adev->gmc.visible_vram_size);
260278973132SSergey Zigachev 	r = vm_phys_fictitious_reg_range(
260378973132SSergey Zigachev 	    adev->gmc.aper_base,
260478973132SSergey Zigachev 	    adev->gmc.aper_base + adev->gmc.visible_vram_size,
260578973132SSergey Zigachev 	    VM_MEMATTR_WRITE_COMBINING);
260678973132SSergey Zigachev 	if (r != 0) {
260778973132SSergey Zigachev 		DRM_ERROR("Failed to register fictitious range 0x%lx-0x%llx (%d).\n",
260878973132SSergey Zigachev 		    (uintmax_t)adev->gmc.aper_base,
260978973132SSergey Zigachev 		    (uintmax_t)adev->gmc.aper_base + adev->gmc.visible_vram_size, r);
261078973132SSergey Zigachev 		return (-r);
261178973132SSergey Zigachev 	}
261278973132SSergey Zigachev 	adev->fictitious_range_registered = true;
261378973132SSergey Zigachev #endif
261478973132SSergey Zigachev 
261578973132SSergey Zigachev kprintf("amdgpu_device_init: 10\n");
2616b843c749SSergey Zigachev 	amdgpu_fbdev_init(adev);
2617b843c749SSergey Zigachev 
2618b843c749SSergey Zigachev 	r = amdgpu_pm_sysfs_init(adev);
2619b843c749SSergey Zigachev 	if (r)
2620b843c749SSergey Zigachev 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2621b843c749SSergey Zigachev 
2622b843c749SSergey Zigachev 	r = amdgpu_debugfs_gem_init(adev);
2623b843c749SSergey Zigachev 	if (r)
2624b843c749SSergey Zigachev 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2625b843c749SSergey Zigachev 
2626b843c749SSergey Zigachev 	r = amdgpu_debugfs_regs_init(adev);
2627b843c749SSergey Zigachev 	if (r)
2628b843c749SSergey Zigachev 		DRM_ERROR("registering register debugfs failed (%d).\n", r);
2629b843c749SSergey Zigachev 
2630b843c749SSergey Zigachev 	r = amdgpu_debugfs_firmware_init(adev);
2631b843c749SSergey Zigachev 	if (r)
2632b843c749SSergey Zigachev 		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2633b843c749SSergey Zigachev 
2634b843c749SSergey Zigachev 	r = amdgpu_debugfs_init(adev);
2635b843c749SSergey Zigachev 	if (r)
2636b843c749SSergey Zigachev 		DRM_ERROR("Creating debugfs files failed (%d).\n", r);
2637b843c749SSergey Zigachev 
2638b843c749SSergey Zigachev 	if ((amdgpu_testing & 1)) {
2639b843c749SSergey Zigachev 		if (adev->accel_working)
2640b843c749SSergey Zigachev 			amdgpu_test_moves(adev);
2641b843c749SSergey Zigachev 		else
2642b843c749SSergey Zigachev 			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2643b843c749SSergey Zigachev 	}
2644b843c749SSergey Zigachev 	if (amdgpu_benchmarking) {
2645b843c749SSergey Zigachev 		if (adev->accel_working)
2646b843c749SSergey Zigachev 			amdgpu_benchmark(adev, amdgpu_benchmarking);
2647b843c749SSergey Zigachev 		else
2648b843c749SSergey Zigachev 			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2649b843c749SSergey Zigachev 	}
2650b843c749SSergey Zigachev 
2651b843c749SSergey Zigachev 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
2652b843c749SSergey Zigachev 	 * explicit gating rather than handling it automatically.
2653b843c749SSergey Zigachev 	 */
2654b843c749SSergey Zigachev 	r = amdgpu_device_ip_late_init(adev);
2655b843c749SSergey Zigachev 	if (r) {
2656b843c749SSergey Zigachev 		dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
2657b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2658b843c749SSergey Zigachev 		goto failed;
2659b843c749SSergey Zigachev 	}
2660b843c749SSergey Zigachev 
2661b843c749SSergey Zigachev 	return 0;
2662b843c749SSergey Zigachev 
2663b843c749SSergey Zigachev failed:
2664b843c749SSergey Zigachev 	amdgpu_vf_error_trans_all(adev);
266578973132SSergey Zigachev #if 0
2666b843c749SSergey Zigachev 	if (runtime)
2667b843c749SSergey Zigachev 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
266878973132SSergey Zigachev #endif
2669b843c749SSergey Zigachev 
2670b843c749SSergey Zigachev 	return r;
2671b843c749SSergey Zigachev }
2672b843c749SSergey Zigachev 
2673b843c749SSergey Zigachev /**
2674b843c749SSergey Zigachev  * amdgpu_device_fini - tear down the driver
2675b843c749SSergey Zigachev  *
2676b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2677b843c749SSergey Zigachev  *
2678b843c749SSergey Zigachev  * Tear down the driver info (all asics).
2679b843c749SSergey Zigachev  * Called at driver shutdown.
2680b843c749SSergey Zigachev  */
amdgpu_device_fini(struct amdgpu_device * adev)2681b843c749SSergey Zigachev void amdgpu_device_fini(struct amdgpu_device *adev)
2682b843c749SSergey Zigachev {
2683b843c749SSergey Zigachev 	int r;
2684b843c749SSergey Zigachev 
2685b843c749SSergey Zigachev 	DRM_INFO("amdgpu: finishing device.\n");
2686b843c749SSergey Zigachev 	adev->shutdown = true;
2687b843c749SSergey Zigachev 	/* disable all interrupts */
2688b843c749SSergey Zigachev 	amdgpu_irq_disable_all(adev);
2689b843c749SSergey Zigachev 	if (adev->mode_info.mode_config_initialized){
2690b843c749SSergey Zigachev 		if (!amdgpu_device_has_dc_support(adev))
2691b843c749SSergey Zigachev 			drm_crtc_force_disable_all(adev->ddev);
2692b843c749SSergey Zigachev 		else
2693b843c749SSergey Zigachev 			drm_atomic_helper_shutdown(adev->ddev);
2694b843c749SSergey Zigachev 	}
2695b843c749SSergey Zigachev 	amdgpu_ib_pool_fini(adev);
2696b843c749SSergey Zigachev 	amdgpu_fence_driver_fini(adev);
2697b843c749SSergey Zigachev 	amdgpu_pm_sysfs_fini(adev);
2698b843c749SSergey Zigachev 	amdgpu_fbdev_fini(adev);
2699b843c749SSergey Zigachev 	r = amdgpu_device_ip_fini(adev);
2700b843c749SSergey Zigachev 	if (adev->firmware.gpu_info_fw) {
2701b843c749SSergey Zigachev 		release_firmware(adev->firmware.gpu_info_fw);
2702b843c749SSergey Zigachev 		adev->firmware.gpu_info_fw = NULL;
2703b843c749SSergey Zigachev 	}
2704b843c749SSergey Zigachev 	adev->accel_working = false;
270578973132SSergey Zigachev #ifdef __DragonFly__
270678973132SSergey Zigachev 	adev->fictitious_range_registered = false;
270778973132SSergey Zigachev #endif
2708b843c749SSergey Zigachev 	cancel_delayed_work_sync(&adev->late_init_work);
2709b843c749SSergey Zigachev 	/* free i2c buses */
2710b843c749SSergey Zigachev 	if (!amdgpu_device_has_dc_support(adev))
2711b843c749SSergey Zigachev 		amdgpu_i2c_fini(adev);
2712b843c749SSergey Zigachev 
2713b843c749SSergey Zigachev 	if (amdgpu_emu_mode != 1)
2714b843c749SSergey Zigachev 		amdgpu_atombios_fini(adev);
2715b843c749SSergey Zigachev 
2716b843c749SSergey Zigachev 	kfree(adev->bios);
2717b843c749SSergey Zigachev 	adev->bios = NULL;
271878973132SSergey Zigachev #if 0
2719b843c749SSergey Zigachev 	if (!pci_is_thunderbolt_attached(adev->pdev))
2720b843c749SSergey Zigachev 		vga_switcheroo_unregister_client(adev->pdev);
2721b843c749SSergey Zigachev 	if (adev->flags & AMD_IS_PX)
2722b843c749SSergey Zigachev 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2723b843c749SSergey Zigachev 	vga_client_register(adev->pdev, NULL, NULL, NULL);
272478973132SSergey Zigachev #endif
2725b843c749SSergey Zigachev 	if (adev->rio_mem)
272678973132SSergey Zigachev 		bus_release_resource(adev->dev->bsddev, SYS_RES_IOPORT, adev->rio_rid,
272778973132SSergey Zigachev 		    adev->rio_mem);
2728b843c749SSergey Zigachev 	adev->rio_mem = NULL;
2729b843c749SSergey Zigachev 	iounmap(adev->rmmio);
2730b843c749SSergey Zigachev 	adev->rmmio = NULL;
2731b843c749SSergey Zigachev 	amdgpu_device_doorbell_fini(adev);
2732b843c749SSergey Zigachev 	amdgpu_debugfs_regs_cleanup(adev);
2733b843c749SSergey Zigachev }
2734b843c749SSergey Zigachev 
2735b843c749SSergey Zigachev 
2736b843c749SSergey Zigachev /*
2737b843c749SSergey Zigachev  * Suspend & resume.
2738b843c749SSergey Zigachev  */
2739b843c749SSergey Zigachev /**
2740b843c749SSergey Zigachev  * amdgpu_device_suspend - initiate device suspend
2741b843c749SSergey Zigachev  *
2742b843c749SSergey Zigachev  * @dev: drm dev pointer
2743b843c749SSergey Zigachev  * @suspend: suspend state
2744b843c749SSergey Zigachev  * @fbcon : notify the fbdev of suspend
2745b843c749SSergey Zigachev  *
2746b843c749SSergey Zigachev  * Puts the hw in the suspend state (all asics).
2747b843c749SSergey Zigachev  * Returns 0 for success or an error on failure.
2748b843c749SSergey Zigachev  * Called at driver suspend.
2749b843c749SSergey Zigachev  */
amdgpu_device_suspend(struct drm_device * dev,bool suspend,bool fbcon)2750b843c749SSergey Zigachev int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2751b843c749SSergey Zigachev {
2752b843c749SSergey Zigachev 	struct amdgpu_device *adev;
2753b843c749SSergey Zigachev 	struct drm_crtc *crtc;
2754b843c749SSergey Zigachev 	struct drm_connector *connector;
2755b843c749SSergey Zigachev 	int r;
2756b843c749SSergey Zigachev 
2757b843c749SSergey Zigachev 	if (dev == NULL || dev->dev_private == NULL) {
2758b843c749SSergey Zigachev 		return -ENODEV;
2759b843c749SSergey Zigachev 	}
2760b843c749SSergey Zigachev 
2761b843c749SSergey Zigachev 	adev = dev->dev_private;
2762b843c749SSergey Zigachev 
2763b843c749SSergey Zigachev 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2764b843c749SSergey Zigachev 		return 0;
2765b843c749SSergey Zigachev 
2766b843c749SSergey Zigachev 	drm_kms_helper_poll_disable(dev);
2767b843c749SSergey Zigachev 
2768b843c749SSergey Zigachev 	if (fbcon)
2769b843c749SSergey Zigachev 		amdgpu_fbdev_set_suspend(adev, 1);
2770b843c749SSergey Zigachev 
2771b843c749SSergey Zigachev 	if (!amdgpu_device_has_dc_support(adev)) {
2772b843c749SSergey Zigachev 		/* turn off display hw */
2773b843c749SSergey Zigachev 		drm_modeset_lock_all(dev);
2774b843c749SSergey Zigachev 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2775b843c749SSergey Zigachev 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2776b843c749SSergey Zigachev 		}
2777b843c749SSergey Zigachev 		drm_modeset_unlock_all(dev);
2778b843c749SSergey Zigachev 			/* unpin the front buffers and cursors */
2779b843c749SSergey Zigachev 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2780b843c749SSergey Zigachev 			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2781b843c749SSergey Zigachev 			struct drm_framebuffer *fb = crtc->primary->fb;
2782b843c749SSergey Zigachev 			struct amdgpu_bo *robj;
2783b843c749SSergey Zigachev 
2784b843c749SSergey Zigachev 			if (amdgpu_crtc->cursor_bo) {
2785b843c749SSergey Zigachev 				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2786b843c749SSergey Zigachev 				r = amdgpu_bo_reserve(aobj, true);
2787b843c749SSergey Zigachev 				if (r == 0) {
2788b843c749SSergey Zigachev 					amdgpu_bo_unpin(aobj);
2789b843c749SSergey Zigachev 					amdgpu_bo_unreserve(aobj);
2790b843c749SSergey Zigachev 				}
2791b843c749SSergey Zigachev 			}
2792b843c749SSergey Zigachev 
2793b843c749SSergey Zigachev 			if (fb == NULL || fb->obj[0] == NULL) {
2794b843c749SSergey Zigachev 				continue;
2795b843c749SSergey Zigachev 			}
2796b843c749SSergey Zigachev 			robj = gem_to_amdgpu_bo(fb->obj[0]);
2797b843c749SSergey Zigachev 			/* don't unpin kernel fb objects */
2798b843c749SSergey Zigachev 			if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2799b843c749SSergey Zigachev 				r = amdgpu_bo_reserve(robj, true);
2800b843c749SSergey Zigachev 				if (r == 0) {
2801b843c749SSergey Zigachev 					amdgpu_bo_unpin(robj);
2802b843c749SSergey Zigachev 					amdgpu_bo_unreserve(robj);
2803b843c749SSergey Zigachev 				}
2804b843c749SSergey Zigachev 			}
2805b843c749SSergey Zigachev 		}
2806b843c749SSergey Zigachev 	}
2807b843c749SSergey Zigachev 
2808b843c749SSergey Zigachev 	amdgpu_amdkfd_suspend(adev);
2809b843c749SSergey Zigachev 
2810b843c749SSergey Zigachev 	r = amdgpu_device_ip_suspend_phase1(adev);
2811b843c749SSergey Zigachev 
2812b843c749SSergey Zigachev 	/* evict vram memory */
2813b843c749SSergey Zigachev 	amdgpu_bo_evict_vram(adev);
2814b843c749SSergey Zigachev 
2815b843c749SSergey Zigachev 	amdgpu_fence_driver_suspend(adev);
2816b843c749SSergey Zigachev 
2817b843c749SSergey Zigachev 	r = amdgpu_device_ip_suspend_phase2(adev);
2818b843c749SSergey Zigachev 
2819b843c749SSergey Zigachev 	/* evict remaining vram memory
2820b843c749SSergey Zigachev 	 * This second call to evict vram is to evict the gart page table
2821b843c749SSergey Zigachev 	 * using the CPU.
2822b843c749SSergey Zigachev 	 */
2823b843c749SSergey Zigachev 	amdgpu_bo_evict_vram(adev);
2824b843c749SSergey Zigachev 
282578973132SSergey Zigachev 	pci_save_state(device_get_parent(adev->dev->bsddev));
2826b843c749SSergey Zigachev 	if (suspend) {
2827b843c749SSergey Zigachev 		/* Shut down the device */
282878973132SSergey Zigachev #if 0
2829b843c749SSergey Zigachev 		pci_disable_device(dev->pdev);
2830b843c749SSergey Zigachev 		pci_set_power_state(dev->pdev, PCI_D3hot);
283178973132SSergey Zigachev #endif
2832b843c749SSergey Zigachev 	} else {
2833b843c749SSergey Zigachev 		r = amdgpu_asic_reset(adev);
2834b843c749SSergey Zigachev 		if (r)
2835b843c749SSergey Zigachev 			DRM_ERROR("amdgpu asic reset failed\n");
2836b843c749SSergey Zigachev 	}
2837b843c749SSergey Zigachev 
2838b843c749SSergey Zigachev 	return 0;
2839b843c749SSergey Zigachev }
2840b843c749SSergey Zigachev 
2841b843c749SSergey Zigachev /**
2842b843c749SSergey Zigachev  * amdgpu_device_resume - initiate device resume
2843b843c749SSergey Zigachev  *
2844b843c749SSergey Zigachev  * @dev: drm dev pointer
2845b843c749SSergey Zigachev  * @resume: resume state
2846b843c749SSergey Zigachev  * @fbcon : notify the fbdev of resume
2847b843c749SSergey Zigachev  *
2848b843c749SSergey Zigachev  * Bring the hw back to operating state (all asics).
2849b843c749SSergey Zigachev  * Returns 0 for success or an error on failure.
2850b843c749SSergey Zigachev  * Called at driver resume.
2851b843c749SSergey Zigachev  */
amdgpu_device_resume(struct drm_device * dev,bool resume,bool fbcon)2852b843c749SSergey Zigachev int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2853b843c749SSergey Zigachev {
2854b843c749SSergey Zigachev 	struct drm_connector *connector;
2855b843c749SSergey Zigachev 	struct amdgpu_device *adev = dev->dev_private;
2856b843c749SSergey Zigachev 	struct drm_crtc *crtc;
2857b843c749SSergey Zigachev 	int r = 0;
2858b843c749SSergey Zigachev 
2859b843c749SSergey Zigachev 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2860b843c749SSergey Zigachev 		return 0;
2861b843c749SSergey Zigachev 
2862b843c749SSergey Zigachev 	if (resume) {
286378973132SSergey Zigachev #if 0
2864b843c749SSergey Zigachev 		pci_set_power_state(dev->pdev, PCI_D0);
2865b843c749SSergey Zigachev 		pci_restore_state(dev->pdev);
2866b843c749SSergey Zigachev 		r = pci_enable_device(dev->pdev);
2867b843c749SSergey Zigachev 		if (r)
2868b843c749SSergey Zigachev 			return r;
286978973132SSergey Zigachev #endif
2870b843c749SSergey Zigachev 	}
2871b843c749SSergey Zigachev 
2872b843c749SSergey Zigachev 	/* post card */
2873b843c749SSergey Zigachev 	if (amdgpu_device_need_post(adev)) {
2874b843c749SSergey Zigachev 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2875b843c749SSergey Zigachev 		if (r)
2876b843c749SSergey Zigachev 			DRM_ERROR("amdgpu asic init failed\n");
2877b843c749SSergey Zigachev 	}
2878b843c749SSergey Zigachev 
2879b843c749SSergey Zigachev 	r = amdgpu_device_ip_resume(adev);
2880b843c749SSergey Zigachev 	if (r) {
2881b843c749SSergey Zigachev 		DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
2882b843c749SSergey Zigachev 		return r;
2883b843c749SSergey Zigachev 	}
2884b843c749SSergey Zigachev 	amdgpu_fence_driver_resume(adev);
2885b843c749SSergey Zigachev 
2886b843c749SSergey Zigachev 
2887b843c749SSergey Zigachev 	r = amdgpu_device_ip_late_init(adev);
2888b843c749SSergey Zigachev 	if (r)
2889b843c749SSergey Zigachev 		return r;
2890b843c749SSergey Zigachev 
2891b843c749SSergey Zigachev 	if (!amdgpu_device_has_dc_support(adev)) {
2892b843c749SSergey Zigachev 		/* pin cursors */
2893b843c749SSergey Zigachev 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2894b843c749SSergey Zigachev 			struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2895b843c749SSergey Zigachev 
2896b843c749SSergey Zigachev 			if (amdgpu_crtc->cursor_bo) {
2897b843c749SSergey Zigachev 				struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2898b843c749SSergey Zigachev 				r = amdgpu_bo_reserve(aobj, true);
2899b843c749SSergey Zigachev 				if (r == 0) {
2900b843c749SSergey Zigachev 					r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2901b843c749SSergey Zigachev 					if (r != 0)
2902b843c749SSergey Zigachev 						DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2903b843c749SSergey Zigachev 					amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2904b843c749SSergey Zigachev 					amdgpu_bo_unreserve(aobj);
2905b843c749SSergey Zigachev 				}
2906b843c749SSergey Zigachev 			}
2907b843c749SSergey Zigachev 		}
2908b843c749SSergey Zigachev 	}
2909b843c749SSergey Zigachev 	r = amdgpu_amdkfd_resume(adev);
2910b843c749SSergey Zigachev 	if (r)
2911b843c749SSergey Zigachev 		return r;
2912b843c749SSergey Zigachev 
2913b843c749SSergey Zigachev 	/* Make sure IB tests flushed */
2914b843c749SSergey Zigachev 	flush_delayed_work(&adev->late_init_work);
2915b843c749SSergey Zigachev 
2916b843c749SSergey Zigachev 	/* blat the mode back in */
2917b843c749SSergey Zigachev 	if (fbcon) {
2918b843c749SSergey Zigachev 		if (!amdgpu_device_has_dc_support(adev)) {
2919b843c749SSergey Zigachev 			/* pre DCE11 */
2920b843c749SSergey Zigachev 			drm_helper_resume_force_mode(dev);
2921b843c749SSergey Zigachev 
2922b843c749SSergey Zigachev 			/* turn on display hw */
2923b843c749SSergey Zigachev 			drm_modeset_lock_all(dev);
2924b843c749SSergey Zigachev 			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2925b843c749SSergey Zigachev 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2926b843c749SSergey Zigachev 			}
2927b843c749SSergey Zigachev 			drm_modeset_unlock_all(dev);
2928b843c749SSergey Zigachev 		}
2929b843c749SSergey Zigachev 		amdgpu_fbdev_set_suspend(adev, 0);
2930b843c749SSergey Zigachev 	}
2931b843c749SSergey Zigachev 
2932b843c749SSergey Zigachev 	drm_kms_helper_poll_enable(dev);
2933b843c749SSergey Zigachev 
2934b843c749SSergey Zigachev 	/*
2935b843c749SSergey Zigachev 	 * Most of the connector probing functions try to acquire runtime pm
2936b843c749SSergey Zigachev 	 * refs to ensure that the GPU is powered on when connector polling is
2937b843c749SSergey Zigachev 	 * performed. Since we're calling this from a runtime PM callback,
2938b843c749SSergey Zigachev 	 * trying to acquire rpm refs will cause us to deadlock.
2939b843c749SSergey Zigachev 	 *
2940b843c749SSergey Zigachev 	 * Since we're guaranteed to be holding the rpm lock, it's safe to
2941b843c749SSergey Zigachev 	 * temporarily disable the rpm helpers so this doesn't deadlock us.
2942b843c749SSergey Zigachev 	 */
2943b843c749SSergey Zigachev #ifdef CONFIG_PM
2944b843c749SSergey Zigachev 	dev->dev->power.disable_depth++;
2945b843c749SSergey Zigachev #endif
2946b843c749SSergey Zigachev 	if (!amdgpu_device_has_dc_support(adev))
2947b843c749SSergey Zigachev 		drm_helper_hpd_irq_event(dev);
2948b843c749SSergey Zigachev 	else
2949b843c749SSergey Zigachev 		drm_kms_helper_hotplug_event(dev);
2950b843c749SSergey Zigachev #ifdef CONFIG_PM
2951b843c749SSergey Zigachev 	dev->dev->power.disable_depth--;
2952b843c749SSergey Zigachev #endif
2953b843c749SSergey Zigachev 	return 0;
2954b843c749SSergey Zigachev }
2955b843c749SSergey Zigachev 
2956b843c749SSergey Zigachev /**
2957b843c749SSergey Zigachev  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
2958b843c749SSergey Zigachev  *
2959b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2960b843c749SSergey Zigachev  *
2961b843c749SSergey Zigachev  * The list of all the hardware IPs that make up the asic is walked and
2962b843c749SSergey Zigachev  * the check_soft_reset callbacks are run.  check_soft_reset determines
2963b843c749SSergey Zigachev  * if the asic is still hung or not.
2964b843c749SSergey Zigachev  * Returns true if any of the IPs are still in a hung state, false if not.
2965b843c749SSergey Zigachev  */
amdgpu_device_ip_check_soft_reset(struct amdgpu_device * adev)2966b843c749SSergey Zigachev static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
2967b843c749SSergey Zigachev {
2968b843c749SSergey Zigachev 	int i;
2969b843c749SSergey Zigachev 	bool asic_hang = false;
2970b843c749SSergey Zigachev 
2971b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
2972b843c749SSergey Zigachev 		return true;
2973b843c749SSergey Zigachev 
2974b843c749SSergey Zigachev 	if (amdgpu_asic_need_full_reset(adev))
2975b843c749SSergey Zigachev 		return true;
2976b843c749SSergey Zigachev 
2977b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
2978b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
2979b843c749SSergey Zigachev 			continue;
2980b843c749SSergey Zigachev 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2981b843c749SSergey Zigachev 			adev->ip_blocks[i].status.hang =
2982b843c749SSergey Zigachev 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2983b843c749SSergey Zigachev 		if (adev->ip_blocks[i].status.hang) {
2984b843c749SSergey Zigachev 			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2985b843c749SSergey Zigachev 			asic_hang = true;
2986b843c749SSergey Zigachev 		}
2987b843c749SSergey Zigachev 	}
2988b843c749SSergey Zigachev 	return asic_hang;
2989b843c749SSergey Zigachev }
2990b843c749SSergey Zigachev 
2991b843c749SSergey Zigachev /**
2992b843c749SSergey Zigachev  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
2993b843c749SSergey Zigachev  *
2994b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
2995b843c749SSergey Zigachev  *
2996b843c749SSergey Zigachev  * The list of all the hardware IPs that make up the asic is walked and the
2997b843c749SSergey Zigachev  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
2998b843c749SSergey Zigachev  * handles any IP specific hardware or software state changes that are
2999b843c749SSergey Zigachev  * necessary for a soft reset to succeed.
3000b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
3001b843c749SSergey Zigachev  */
amdgpu_device_ip_pre_soft_reset(struct amdgpu_device * adev)3002b843c749SSergey Zigachev static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3003b843c749SSergey Zigachev {
3004b843c749SSergey Zigachev 	int i, r = 0;
3005b843c749SSergey Zigachev 
3006b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
3007b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
3008b843c749SSergey Zigachev 			continue;
3009b843c749SSergey Zigachev 		if (adev->ip_blocks[i].status.hang &&
3010b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3011b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3012b843c749SSergey Zigachev 			if (r)
3013b843c749SSergey Zigachev 				return r;
3014b843c749SSergey Zigachev 		}
3015b843c749SSergey Zigachev 	}
3016b843c749SSergey Zigachev 
3017b843c749SSergey Zigachev 	return 0;
3018b843c749SSergey Zigachev }
3019b843c749SSergey Zigachev 
3020b843c749SSergey Zigachev /**
3021b843c749SSergey Zigachev  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3022b843c749SSergey Zigachev  *
3023b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
3024b843c749SSergey Zigachev  *
3025b843c749SSergey Zigachev  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
3026b843c749SSergey Zigachev  * reset is necessary to recover.
3027b843c749SSergey Zigachev  * Returns true if a full asic reset is required, false if not.
3028b843c749SSergey Zigachev  */
amdgpu_device_ip_need_full_reset(struct amdgpu_device * adev)3029b843c749SSergey Zigachev static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3030b843c749SSergey Zigachev {
3031b843c749SSergey Zigachev 	int i;
3032b843c749SSergey Zigachev 
3033b843c749SSergey Zigachev 	if (amdgpu_asic_need_full_reset(adev))
3034b843c749SSergey Zigachev 		return true;
3035b843c749SSergey Zigachev 
3036b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
3037b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
3038b843c749SSergey Zigachev 			continue;
3039b843c749SSergey Zigachev 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3040b843c749SSergey Zigachev 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3041b843c749SSergey Zigachev 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3042b843c749SSergey Zigachev 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3043b843c749SSergey Zigachev 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3044b843c749SSergey Zigachev 			if (adev->ip_blocks[i].status.hang) {
3045b843c749SSergey Zigachev 				DRM_INFO("Some block need full reset!\n");
3046b843c749SSergey Zigachev 				return true;
3047b843c749SSergey Zigachev 			}
3048b843c749SSergey Zigachev 		}
3049b843c749SSergey Zigachev 	}
3050b843c749SSergey Zigachev 	return false;
3051b843c749SSergey Zigachev }
3052b843c749SSergey Zigachev 
3053b843c749SSergey Zigachev /**
3054b843c749SSergey Zigachev  * amdgpu_device_ip_soft_reset - do a soft reset
3055b843c749SSergey Zigachev  *
3056b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
3057b843c749SSergey Zigachev  *
3058b843c749SSergey Zigachev  * The list of all the hardware IPs that make up the asic is walked and the
3059b843c749SSergey Zigachev  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
3060b843c749SSergey Zigachev  * IP specific hardware or software state changes that are necessary to soft
3061b843c749SSergey Zigachev  * reset the IP.
3062b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
3063b843c749SSergey Zigachev  */
amdgpu_device_ip_soft_reset(struct amdgpu_device * adev)3064b843c749SSergey Zigachev static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3065b843c749SSergey Zigachev {
3066b843c749SSergey Zigachev 	int i, r = 0;
3067b843c749SSergey Zigachev 
3068b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
3069b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
3070b843c749SSergey Zigachev 			continue;
3071b843c749SSergey Zigachev 		if (adev->ip_blocks[i].status.hang &&
3072b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->funcs->soft_reset) {
3073b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3074b843c749SSergey Zigachev 			if (r)
3075b843c749SSergey Zigachev 				return r;
3076b843c749SSergey Zigachev 		}
3077b843c749SSergey Zigachev 	}
3078b843c749SSergey Zigachev 
3079b843c749SSergey Zigachev 	return 0;
3080b843c749SSergey Zigachev }
3081b843c749SSergey Zigachev 
3082b843c749SSergey Zigachev /**
3083b843c749SSergey Zigachev  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3084b843c749SSergey Zigachev  *
3085b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
3086b843c749SSergey Zigachev  *
3087b843c749SSergey Zigachev  * The list of all the hardware IPs that make up the asic is walked and the
3088b843c749SSergey Zigachev  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
3089b843c749SSergey Zigachev  * handles any IP specific hardware or software state changes that are
3090b843c749SSergey Zigachev  * necessary after the IP has been soft reset.
3091b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
3092b843c749SSergey Zigachev  */
amdgpu_device_ip_post_soft_reset(struct amdgpu_device * adev)3093b843c749SSergey Zigachev static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3094b843c749SSergey Zigachev {
3095b843c749SSergey Zigachev 	int i, r = 0;
3096b843c749SSergey Zigachev 
3097b843c749SSergey Zigachev 	for (i = 0; i < adev->num_ip_blocks; i++) {
3098b843c749SSergey Zigachev 		if (!adev->ip_blocks[i].status.valid)
3099b843c749SSergey Zigachev 			continue;
3100b843c749SSergey Zigachev 		if (adev->ip_blocks[i].status.hang &&
3101b843c749SSergey Zigachev 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
3102b843c749SSergey Zigachev 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3103b843c749SSergey Zigachev 		if (r)
3104b843c749SSergey Zigachev 			return r;
3105b843c749SSergey Zigachev 	}
3106b843c749SSergey Zigachev 
3107b843c749SSergey Zigachev 	return 0;
3108b843c749SSergey Zigachev }
3109b843c749SSergey Zigachev 
3110b843c749SSergey Zigachev /**
3111b843c749SSergey Zigachev  * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers
3112b843c749SSergey Zigachev  *
3113b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
3114b843c749SSergey Zigachev  * @ring: amdgpu_ring for the engine handling the buffer operations
3115b843c749SSergey Zigachev  * @bo: amdgpu_bo buffer whose shadow is being restored
3116b843c749SSergey Zigachev  * @fence: dma_fence associated with the operation
3117b843c749SSergey Zigachev  *
3118b843c749SSergey Zigachev  * Restores the VRAM buffer contents from the shadow in GTT.  Used to
3119b843c749SSergey Zigachev  * restore things like GPUVM page tables after a GPU reset where
3120b843c749SSergey Zigachev  * the contents of VRAM might be lost.
3121b843c749SSergey Zigachev  * Returns 0 on success, negative error code on failure.
3122b843c749SSergey Zigachev  */
amdgpu_device_recover_vram_from_shadow(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_bo * bo,struct dma_fence ** fence)3123b843c749SSergey Zigachev static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
3124b843c749SSergey Zigachev 						  struct amdgpu_ring *ring,
3125b843c749SSergey Zigachev 						  struct amdgpu_bo *bo,
3126b843c749SSergey Zigachev 						  struct dma_fence **fence)
3127b843c749SSergey Zigachev {
3128b843c749SSergey Zigachev 	uint32_t domain;
3129b843c749SSergey Zigachev 	int r;
3130b843c749SSergey Zigachev 
3131b843c749SSergey Zigachev 	if (!bo->shadow)
3132b843c749SSergey Zigachev 		return 0;
3133b843c749SSergey Zigachev 
3134b843c749SSergey Zigachev 	r = amdgpu_bo_reserve(bo, true);
3135b843c749SSergey Zigachev 	if (r)
3136b843c749SSergey Zigachev 		return r;
3137b843c749SSergey Zigachev 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
3138b843c749SSergey Zigachev 	/* if bo has been evicted, then no need to recover */
3139b843c749SSergey Zigachev 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
3140b843c749SSergey Zigachev 		r = amdgpu_bo_validate(bo->shadow);
3141b843c749SSergey Zigachev 		if (r) {
3142b843c749SSergey Zigachev 			DRM_ERROR("bo validate failed!\n");
3143b843c749SSergey Zigachev 			goto err;
3144b843c749SSergey Zigachev 		}
3145b843c749SSergey Zigachev 
3146b843c749SSergey Zigachev 		r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
3147b843c749SSergey Zigachev 						 NULL, fence, true);
3148b843c749SSergey Zigachev 		if (r) {
3149b843c749SSergey Zigachev 			DRM_ERROR("recover page table failed!\n");
3150b843c749SSergey Zigachev 			goto err;
3151b843c749SSergey Zigachev 		}
3152b843c749SSergey Zigachev 	}
3153b843c749SSergey Zigachev err:
3154b843c749SSergey Zigachev 	amdgpu_bo_unreserve(bo);
3155b843c749SSergey Zigachev 	return r;
3156b843c749SSergey Zigachev }
3157b843c749SSergey Zigachev 
3158b843c749SSergey Zigachev /**
3159b843c749SSergey Zigachev  * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents
3160b843c749SSergey Zigachev  *
3161b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
3162b843c749SSergey Zigachev  *
3163b843c749SSergey Zigachev  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
3164b843c749SSergey Zigachev  * restore things like GPUVM page tables after a GPU reset where
3165b843c749SSergey Zigachev  * the contents of VRAM might be lost.
3166b843c749SSergey Zigachev  * Returns 0 on success, 1 on failure.
3167b843c749SSergey Zigachev  */
amdgpu_device_handle_vram_lost(struct amdgpu_device * adev)3168b843c749SSergey Zigachev static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
3169b843c749SSergey Zigachev {
3170b843c749SSergey Zigachev 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
3171b843c749SSergey Zigachev 	struct amdgpu_bo *bo, *tmp;
3172b843c749SSergey Zigachev 	struct dma_fence *fence = NULL, *next = NULL;
3173b843c749SSergey Zigachev 	long r = 1;
3174b843c749SSergey Zigachev 	int i = 0;
3175b843c749SSergey Zigachev 	long tmo;
3176b843c749SSergey Zigachev 
3177b843c749SSergey Zigachev 	if (amdgpu_sriov_runtime(adev))
3178b843c749SSergey Zigachev 		tmo = msecs_to_jiffies(8000);
3179b843c749SSergey Zigachev 	else
3180b843c749SSergey Zigachev 		tmo = msecs_to_jiffies(100);
3181b843c749SSergey Zigachev 
3182b843c749SSergey Zigachev 	DRM_INFO("recover vram bo from shadow start\n");
3183b843c749SSergey Zigachev 	mutex_lock(&adev->shadow_list_lock);
3184b843c749SSergey Zigachev 	list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
3185b843c749SSergey Zigachev 		next = NULL;
3186b843c749SSergey Zigachev 		amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
3187b843c749SSergey Zigachev 		if (fence) {
3188b843c749SSergey Zigachev 			r = dma_fence_wait_timeout(fence, false, tmo);
3189b843c749SSergey Zigachev 			if (r == 0)
3190b843c749SSergey Zigachev 				pr_err("wait fence %p[%d] timeout\n", fence, i);
3191b843c749SSergey Zigachev 			else if (r < 0)
3192b843c749SSergey Zigachev 				pr_err("wait fence %p[%d] interrupted\n", fence, i);
3193b843c749SSergey Zigachev 			if (r < 1) {
3194b843c749SSergey Zigachev 				dma_fence_put(fence);
3195b843c749SSergey Zigachev 				fence = next;
3196b843c749SSergey Zigachev 				break;
3197b843c749SSergey Zigachev 			}
3198b843c749SSergey Zigachev 			i++;
3199b843c749SSergey Zigachev 		}
3200b843c749SSergey Zigachev 
3201b843c749SSergey Zigachev 		dma_fence_put(fence);
3202b843c749SSergey Zigachev 		fence = next;
3203b843c749SSergey Zigachev 	}
3204b843c749SSergey Zigachev 	mutex_unlock(&adev->shadow_list_lock);
3205b843c749SSergey Zigachev 
3206b843c749SSergey Zigachev 	if (fence) {
3207b843c749SSergey Zigachev 		r = dma_fence_wait_timeout(fence, false, tmo);
3208b843c749SSergey Zigachev 		if (r == 0)
3209b843c749SSergey Zigachev 			pr_err("wait fence %p[%d] timeout\n", fence, i);
3210b843c749SSergey Zigachev 		else if (r < 0)
3211b843c749SSergey Zigachev 			pr_err("wait fence %p[%d] interrupted\n", fence, i);
3212b843c749SSergey Zigachev 
3213b843c749SSergey Zigachev 	}
3214b843c749SSergey Zigachev 	dma_fence_put(fence);
3215b843c749SSergey Zigachev 
3216b843c749SSergey Zigachev 	if (r > 0)
3217b843c749SSergey Zigachev 		DRM_INFO("recover vram bo from shadow done\n");
3218b843c749SSergey Zigachev 	else
3219b843c749SSergey Zigachev 		DRM_ERROR("recover vram bo from shadow failed\n");
3220b843c749SSergey Zigachev 
3221b843c749SSergey Zigachev 	return (r > 0) ? 0 : 1;
3222b843c749SSergey Zigachev }
3223b843c749SSergey Zigachev 
3224b843c749SSergey Zigachev /**
3225b843c749SSergey Zigachev  * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
3226b843c749SSergey Zigachev  *
3227b843c749SSergey Zigachev  * @adev: amdgpu device pointer
3228b843c749SSergey Zigachev  *
3229b843c749SSergey Zigachev  * attempt to do soft-reset or full-reset and reinitialize Asic
3230b843c749SSergey Zigachev  * return 0 means succeeded otherwise failed
3231b843c749SSergey Zigachev  */
amdgpu_device_reset(struct amdgpu_device * adev)3232b843c749SSergey Zigachev static int amdgpu_device_reset(struct amdgpu_device *adev)
3233b843c749SSergey Zigachev {
3234b843c749SSergey Zigachev 	bool need_full_reset, vram_lost = 0;
3235b843c749SSergey Zigachev 	int r;
3236b843c749SSergey Zigachev 
3237b843c749SSergey Zigachev 	need_full_reset = amdgpu_device_ip_need_full_reset(adev);
3238b843c749SSergey Zigachev 
3239b843c749SSergey Zigachev 	if (!need_full_reset) {
3240b843c749SSergey Zigachev 		amdgpu_device_ip_pre_soft_reset(adev);
3241b843c749SSergey Zigachev 		r = amdgpu_device_ip_soft_reset(adev);
3242b843c749SSergey Zigachev 		amdgpu_device_ip_post_soft_reset(adev);
3243b843c749SSergey Zigachev 		if (r || amdgpu_device_ip_check_soft_reset(adev)) {
3244b843c749SSergey Zigachev 			DRM_INFO("soft reset failed, will fallback to full reset!\n");
3245b843c749SSergey Zigachev 			need_full_reset = true;
3246b843c749SSergey Zigachev 		}
3247b843c749SSergey Zigachev 	}
3248b843c749SSergey Zigachev 
3249b843c749SSergey Zigachev 	if (need_full_reset) {
3250b843c749SSergey Zigachev 		r = amdgpu_device_ip_suspend(adev);
3251b843c749SSergey Zigachev 
3252b843c749SSergey Zigachev retry:
3253b843c749SSergey Zigachev 		r = amdgpu_asic_reset(adev);
3254b843c749SSergey Zigachev 		/* post card */
3255b843c749SSergey Zigachev 		amdgpu_atom_asic_init(adev->mode_info.atom_context);
3256b843c749SSergey Zigachev 
3257b843c749SSergey Zigachev 		if (!r) {
3258b843c749SSergey Zigachev 			dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
3259b843c749SSergey Zigachev 			r = amdgpu_device_ip_resume_phase1(adev);
3260b843c749SSergey Zigachev 			if (r)
3261b843c749SSergey Zigachev 				goto out;
3262b843c749SSergey Zigachev 
3263b843c749SSergey Zigachev 			vram_lost = amdgpu_device_check_vram_lost(adev);
3264b843c749SSergey Zigachev 			if (vram_lost) {
3265b843c749SSergey Zigachev 				DRM_ERROR("VRAM is lost!\n");
3266b843c749SSergey Zigachev 				atomic_inc(&adev->vram_lost_counter);
3267b843c749SSergey Zigachev 			}
3268b843c749SSergey Zigachev 
3269b843c749SSergey Zigachev 			r = amdgpu_gtt_mgr_recover(
3270b843c749SSergey Zigachev 				&adev->mman.bdev.man[TTM_PL_TT]);
3271b843c749SSergey Zigachev 			if (r)
3272b843c749SSergey Zigachev 				goto out;
3273b843c749SSergey Zigachev 
3274b843c749SSergey Zigachev 			r = amdgpu_device_ip_resume_phase2(adev);
3275b843c749SSergey Zigachev 			if (r)
3276b843c749SSergey Zigachev 				goto out;
3277b843c749SSergey Zigachev 
3278b843c749SSergey Zigachev 			if (vram_lost)
3279b843c749SSergey Zigachev 				amdgpu_device_fill_reset_magic(adev);
3280b843c749SSergey Zigachev 		}
3281b843c749SSergey Zigachev 	}
3282b843c749SSergey Zigachev 
3283b843c749SSergey Zigachev out:
3284b843c749SSergey Zigachev 	if (!r) {
3285b843c749SSergey Zigachev 		amdgpu_irq_gpu_reset_resume_helper(adev);
3286b843c749SSergey Zigachev 		r = amdgpu_ib_ring_tests(adev);
3287b843c749SSergey Zigachev 		if (r) {
3288b843c749SSergey Zigachev 			dev_err(adev->dev, "ib ring test failed (%d).\n", r);
3289b843c749SSergey Zigachev 			r = amdgpu_device_ip_suspend(adev);
3290b843c749SSergey Zigachev 			need_full_reset = true;
3291b843c749SSergey Zigachev 			goto retry;
3292b843c749SSergey Zigachev 		}
3293b843c749SSergey Zigachev 	}
3294b843c749SSergey Zigachev 
3295b843c749SSergey Zigachev 	if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
3296b843c749SSergey Zigachev 		r = amdgpu_device_handle_vram_lost(adev);
3297b843c749SSergey Zigachev 
3298b843c749SSergey Zigachev 	return r;
3299b843c749SSergey Zigachev }
3300b843c749SSergey Zigachev 
3301b843c749SSergey Zigachev /**
3302b843c749SSergey Zigachev  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
3303b843c749SSergey Zigachev  *
3304b843c749SSergey Zigachev  * @adev: amdgpu device pointer
3305b843c749SSergey Zigachev  * @from_hypervisor: request from hypervisor
3306b843c749SSergey Zigachev  *
3307b843c749SSergey Zigachev  * do VF FLR and reinitialize Asic
3308b843c749SSergey Zigachev  * return 0 means succeeded otherwise failed
3309b843c749SSergey Zigachev  */
amdgpu_device_reset_sriov(struct amdgpu_device * adev,bool from_hypervisor)3310b843c749SSergey Zigachev static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3311b843c749SSergey Zigachev 				     bool from_hypervisor)
3312b843c749SSergey Zigachev {
3313b843c749SSergey Zigachev 	int r;
3314b843c749SSergey Zigachev 
3315b843c749SSergey Zigachev 	if (from_hypervisor)
3316b843c749SSergey Zigachev 		r = amdgpu_virt_request_full_gpu(adev, true);
3317b843c749SSergey Zigachev 	else
3318b843c749SSergey Zigachev 		r = amdgpu_virt_reset_gpu(adev);
3319b843c749SSergey Zigachev 	if (r)
3320b843c749SSergey Zigachev 		return r;
3321b843c749SSergey Zigachev 
3322b843c749SSergey Zigachev 	/* Resume IP prior to SMC */
3323b843c749SSergey Zigachev 	r = amdgpu_device_ip_reinit_early_sriov(adev);
3324b843c749SSergey Zigachev 	if (r)
3325b843c749SSergey Zigachev 		goto error;
3326b843c749SSergey Zigachev 
3327b843c749SSergey Zigachev 	/* we need recover gart prior to run SMC/CP/SDMA resume */
3328b843c749SSergey Zigachev 	amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3329b843c749SSergey Zigachev 
3330b843c749SSergey Zigachev 	/* now we are okay to resume SMC/CP/SDMA */
3331b843c749SSergey Zigachev 	r = amdgpu_device_ip_reinit_late_sriov(adev);
3332b843c749SSergey Zigachev 	if (r)
3333b843c749SSergey Zigachev 		goto error;
3334b843c749SSergey Zigachev 
3335b843c749SSergey Zigachev 	amdgpu_irq_gpu_reset_resume_helper(adev);
3336b843c749SSergey Zigachev 	r = amdgpu_ib_ring_tests(adev);
3337b843c749SSergey Zigachev 
3338b843c749SSergey Zigachev error:
3339b843c749SSergey Zigachev 	amdgpu_virt_init_data_exchange(adev);
3340b843c749SSergey Zigachev 	amdgpu_virt_release_full_gpu(adev, true);
3341b843c749SSergey Zigachev 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3342b843c749SSergey Zigachev 		atomic_inc(&adev->vram_lost_counter);
3343b843c749SSergey Zigachev 		r = amdgpu_device_handle_vram_lost(adev);
3344b843c749SSergey Zigachev 	}
3345b843c749SSergey Zigachev 
3346b843c749SSergey Zigachev 	return r;
3347b843c749SSergey Zigachev }
3348b843c749SSergey Zigachev 
3349b843c749SSergey Zigachev /**
3350b843c749SSergey Zigachev  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
3351b843c749SSergey Zigachev  *
3352b843c749SSergey Zigachev  * @adev: amdgpu device pointer
3353b843c749SSergey Zigachev  * @job: which job trigger hang
3354b843c749SSergey Zigachev  * @force: forces reset regardless of amdgpu_gpu_recovery
3355b843c749SSergey Zigachev  *
3356b843c749SSergey Zigachev  * Attempt to reset the GPU if it has hung (all asics).
3357b843c749SSergey Zigachev  * Returns 0 for success or an error on failure.
3358b843c749SSergey Zigachev  */
amdgpu_device_gpu_recover(struct amdgpu_device * adev,struct amdgpu_job * job,bool force)3359b843c749SSergey Zigachev int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3360b843c749SSergey Zigachev 			      struct amdgpu_job *job, bool force)
3361b843c749SSergey Zigachev {
3362b843c749SSergey Zigachev 	int i, r, resched;
3363b843c749SSergey Zigachev 
3364b843c749SSergey Zigachev 	if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
3365b843c749SSergey Zigachev 		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
3366b843c749SSergey Zigachev 		return 0;
3367b843c749SSergey Zigachev 	}
3368b843c749SSergey Zigachev 
3369b843c749SSergey Zigachev 	if (!force && (amdgpu_gpu_recovery == 0 ||
3370b843c749SSergey Zigachev 			(amdgpu_gpu_recovery == -1  && !amdgpu_sriov_vf(adev)))) {
3371b843c749SSergey Zigachev 		DRM_INFO("GPU recovery disabled.\n");
3372b843c749SSergey Zigachev 		return 0;
3373b843c749SSergey Zigachev 	}
3374b843c749SSergey Zigachev 
3375b843c749SSergey Zigachev 	dev_info(adev->dev, "GPU reset begin!\n");
3376b843c749SSergey Zigachev 
3377b843c749SSergey Zigachev 	mutex_lock(&adev->lock_reset);
3378b843c749SSergey Zigachev 	atomic_inc(&adev->gpu_reset_counter);
3379b843c749SSergey Zigachev 	adev->in_gpu_reset = 1;
3380b843c749SSergey Zigachev 
3381b843c749SSergey Zigachev 	/* Block kfd */
3382b843c749SSergey Zigachev 	amdgpu_amdkfd_pre_reset(adev);
3383b843c749SSergey Zigachev 
3384b843c749SSergey Zigachev 	/* block TTM */
3385b843c749SSergey Zigachev 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3386b843c749SSergey Zigachev 
3387b843c749SSergey Zigachev 	/* block all schedulers and reset given job's ring */
3388b843c749SSergey Zigachev 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3389b843c749SSergey Zigachev 		struct amdgpu_ring *ring = adev->rings[i];
3390b843c749SSergey Zigachev 
3391b843c749SSergey Zigachev 		if (!ring || !ring->sched.thread)
3392b843c749SSergey Zigachev 			continue;
3393b843c749SSergey Zigachev 
3394b843c749SSergey Zigachev 		kthread_park(ring->sched.thread);
3395b843c749SSergey Zigachev 
3396b843c749SSergey Zigachev 		if (job && job->base.sched == &ring->sched)
3397b843c749SSergey Zigachev 			continue;
3398b843c749SSergey Zigachev 
3399b843c749SSergey Zigachev 		drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
3400b843c749SSergey Zigachev 
3401b843c749SSergey Zigachev 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3402b843c749SSergey Zigachev 		amdgpu_fence_driver_force_completion(ring);
3403b843c749SSergey Zigachev 	}
3404b843c749SSergey Zigachev 
3405b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev))
3406b843c749SSergey Zigachev 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
3407b843c749SSergey Zigachev 	else
3408b843c749SSergey Zigachev 		r = amdgpu_device_reset(adev);
3409b843c749SSergey Zigachev 
3410b843c749SSergey Zigachev 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3411b843c749SSergey Zigachev 		struct amdgpu_ring *ring = adev->rings[i];
3412b843c749SSergey Zigachev 
3413b843c749SSergey Zigachev 		if (!ring || !ring->sched.thread)
3414b843c749SSergey Zigachev 			continue;
3415b843c749SSergey Zigachev 
3416b843c749SSergey Zigachev 		/* only need recovery sched of the given job's ring
3417b843c749SSergey Zigachev 		 * or all rings (in the case @job is NULL)
3418b843c749SSergey Zigachev 		 * after above amdgpu_reset accomplished
3419b843c749SSergey Zigachev 		 */
3420b843c749SSergey Zigachev 		if ((!job || job->base.sched == &ring->sched) && !r)
3421b843c749SSergey Zigachev 			drm_sched_job_recovery(&ring->sched);
3422b843c749SSergey Zigachev 
3423b843c749SSergey Zigachev 		kthread_unpark(ring->sched.thread);
3424b843c749SSergey Zigachev 	}
3425b843c749SSergey Zigachev 
3426b843c749SSergey Zigachev 	if (!amdgpu_device_has_dc_support(adev)) {
3427b843c749SSergey Zigachev 		drm_helper_resume_force_mode(adev->ddev);
3428b843c749SSergey Zigachev 	}
3429b843c749SSergey Zigachev 
3430b843c749SSergey Zigachev 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
3431b843c749SSergey Zigachev 
3432b843c749SSergey Zigachev 	if (r) {
3433b843c749SSergey Zigachev 		/* bad news, how to tell it to userspace ? */
3434b843c749SSergey Zigachev 		dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3435b843c749SSergey Zigachev 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3436b843c749SSergey Zigachev 	} else {
3437b843c749SSergey Zigachev 		dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
3438b843c749SSergey Zigachev 	}
3439b843c749SSergey Zigachev 
3440b843c749SSergey Zigachev 	/*unlock kfd */
3441b843c749SSergey Zigachev 	amdgpu_amdkfd_post_reset(adev);
3442b843c749SSergey Zigachev 	amdgpu_vf_error_trans_all(adev);
3443b843c749SSergey Zigachev 	adev->in_gpu_reset = 0;
3444b843c749SSergey Zigachev 	mutex_unlock(&adev->lock_reset);
3445b843c749SSergey Zigachev 	return r;
3446b843c749SSergey Zigachev }
3447b843c749SSergey Zigachev 
3448b843c749SSergey Zigachev /**
3449b843c749SSergey Zigachev  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3450b843c749SSergey Zigachev  *
3451b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
3452b843c749SSergey Zigachev  *
3453b843c749SSergey Zigachev  * Fetchs and stores in the driver the PCIE capabilities (gen speed
3454b843c749SSergey Zigachev  * and lanes) of the slot the device is in. Handles APUs and
3455b843c749SSergey Zigachev  * virtualized environments where PCIE config space may not be available.
3456b843c749SSergey Zigachev  */
amdgpu_device_get_pcie_info(struct amdgpu_device * adev)3457b843c749SSergey Zigachev static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
3458b843c749SSergey Zigachev {
3459b843c749SSergey Zigachev 	struct pci_dev *pdev;
3460b843c749SSergey Zigachev 	enum pci_bus_speed speed_cap;
3461b843c749SSergey Zigachev 	enum pcie_link_width link_width;
3462b843c749SSergey Zigachev 
3463b843c749SSergey Zigachev 	if (amdgpu_pcie_gen_cap)
3464b843c749SSergey Zigachev 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3465b843c749SSergey Zigachev 
3466b843c749SSergey Zigachev 	if (amdgpu_pcie_lane_cap)
3467b843c749SSergey Zigachev 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3468b843c749SSergey Zigachev 
3469b843c749SSergey Zigachev 	/* covers APUs as well */
347078973132SSergey Zigachev #if 0
3471b843c749SSergey Zigachev 	if (pci_is_root_bus(adev->pdev->bus)) {
347278973132SSergey Zigachev #endif
3473b843c749SSergey Zigachev 		if (adev->pm.pcie_gen_mask == 0)
3474b843c749SSergey Zigachev 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3475b843c749SSergey Zigachev 		if (adev->pm.pcie_mlw_mask == 0)
3476b843c749SSergey Zigachev 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3477b843c749SSergey Zigachev 		return;
347878973132SSergey Zigachev #if 0	/* pci_is_root_bus() */
3479b843c749SSergey Zigachev  	}
348078973132SSergey Zigachev #endif
3481b843c749SSergey Zigachev 
3482b843c749SSergey Zigachev 	if (adev->pm.pcie_gen_mask == 0) {
3483b843c749SSergey Zigachev 		/* asic caps */
3484b843c749SSergey Zigachev 		pdev = adev->pdev;
3485b843c749SSergey Zigachev 		speed_cap = pcie_get_speed_cap(pdev);
3486b843c749SSergey Zigachev 		if (speed_cap == PCI_SPEED_UNKNOWN) {
3487b843c749SSergey Zigachev 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3488b843c749SSergey Zigachev 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3489b843c749SSergey Zigachev 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3490b843c749SSergey Zigachev 		} else {
3491b843c749SSergey Zigachev 			if (speed_cap == PCIE_SPEED_16_0GT)
3492b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3493b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3494b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
3495b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
3496b843c749SSergey Zigachev 			else if (speed_cap == PCIE_SPEED_8_0GT)
3497b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3498b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3499b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3500b843c749SSergey Zigachev 			else if (speed_cap == PCIE_SPEED_5_0GT)
3501b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3502b843c749SSergey Zigachev 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
3503b843c749SSergey Zigachev 			else
3504b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
3505b843c749SSergey Zigachev 		}
3506b843c749SSergey Zigachev 		/* platform caps */
3507b843c749SSergey Zigachev 		pdev = adev->ddev->pdev->bus->self;
3508b843c749SSergey Zigachev 		speed_cap = pcie_get_speed_cap(pdev);
3509b843c749SSergey Zigachev 		if (speed_cap == PCI_SPEED_UNKNOWN) {
3510b843c749SSergey Zigachev 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3511b843c749SSergey Zigachev 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
3512b843c749SSergey Zigachev 		} else {
3513b843c749SSergey Zigachev 			if (speed_cap == PCIE_SPEED_16_0GT)
3514b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3515b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3516b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
3517b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
3518b843c749SSergey Zigachev 			else if (speed_cap == PCIE_SPEED_8_0GT)
3519b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3520b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3521b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
3522b843c749SSergey Zigachev 			else if (speed_cap == PCIE_SPEED_5_0GT)
3523b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3524b843c749SSergey Zigachev 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
3525b843c749SSergey Zigachev 			else
3526b843c749SSergey Zigachev 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3527b843c749SSergey Zigachev 
3528b843c749SSergey Zigachev 		}
3529b843c749SSergey Zigachev 	}
3530b843c749SSergey Zigachev 	if (adev->pm.pcie_mlw_mask == 0) {
3531b843c749SSergey Zigachev 		pdev = adev->ddev->pdev->bus->self;
3532b843c749SSergey Zigachev 		link_width = pcie_get_width_cap(pdev);
3533b843c749SSergey Zigachev 		if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
3534b843c749SSergey Zigachev 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
3535b843c749SSergey Zigachev 		} else {
3536b843c749SSergey Zigachev 			switch (link_width) {
3537b843c749SSergey Zigachev 			case PCIE_LNK_X32:
3538b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3539b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3540b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3541b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3542b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3543b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3544b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3545b843c749SSergey Zigachev 				break;
3546b843c749SSergey Zigachev 			case PCIE_LNK_X16:
3547b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3548b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3549b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3550b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3551b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3552b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3553b843c749SSergey Zigachev 				break;
3554b843c749SSergey Zigachev 			case PCIE_LNK_X12:
3555b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3556b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3557b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3558b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3559b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3560b843c749SSergey Zigachev 				break;
3561b843c749SSergey Zigachev 			case PCIE_LNK_X8:
3562b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3563b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3564b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3565b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3566b843c749SSergey Zigachev 				break;
3567b843c749SSergey Zigachev 			case PCIE_LNK_X4:
3568b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3569b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3570b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3571b843c749SSergey Zigachev 				break;
3572b843c749SSergey Zigachev 			case PCIE_LNK_X2:
3573b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3574b843c749SSergey Zigachev 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3575b843c749SSergey Zigachev 				break;
3576b843c749SSergey Zigachev 			case PCIE_LNK_X1:
3577b843c749SSergey Zigachev 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3578b843c749SSergey Zigachev 				break;
3579b843c749SSergey Zigachev 			default:
3580b843c749SSergey Zigachev 				break;
3581b843c749SSergey Zigachev 			}
3582b843c749SSergey Zigachev 		}
3583b843c749SSergey Zigachev 	}
3584b843c749SSergey Zigachev }
3585b843c749SSergey Zigachev 
3586