xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c (revision 3374c67d44f9b75b98444cbf63020f777792342e)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38 
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_probe_helper.h>
41 #include <drm/amdgpu_drm.h>
42 #include <linux/vgaarb.h>
43 #include <linux/vga_switcheroo.h>
44 #include <linux/efi.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "amdgpu_i2c.h"
48 #include "atom.h"
49 #include "amdgpu_atombios.h"
50 #include "amdgpu_atomfirmware.h"
51 #include "amd_pcie.h"
52 #ifdef CONFIG_DRM_AMDGPU_SI
53 #include "si.h"
54 #endif
55 #ifdef CONFIG_DRM_AMDGPU_CIK
56 #include "cik.h"
57 #endif
58 #include "vi.h"
59 #include "soc15.h"
60 #include "nv.h"
61 #include "bif/bif_4_1_d.h"
62 #include <linux/firmware.h>
63 #include "amdgpu_vf_error.h"
64 
65 #include "amdgpu_amdkfd.h"
66 #include "amdgpu_pm.h"
67 
68 #include "amdgpu_xgmi.h"
69 #include "amdgpu_ras.h"
70 #include "amdgpu_pmu.h"
71 #include "amdgpu_fru_eeprom.h"
72 #include "amdgpu_reset.h"
73 
74 #include <linux/suspend.h>
75 #include <drm/task_barrier.h>
76 #include <linux/pm_runtime.h>
77 
78 #include <drm/drm_drv.h>
79 
80 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87 
88 #define AMDGPU_RESUME_MS		2000
89 #define AMDGPU_MAX_RETRY_LIMIT		2
90 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
91 
92 const char *amdgpu_asic_name[] = {
93 	"TAHITI",
94 	"PITCAIRN",
95 	"VERDE",
96 	"OLAND",
97 	"HAINAN",
98 	"BONAIRE",
99 	"KAVERI",
100 	"KABINI",
101 	"HAWAII",
102 	"MULLINS",
103 	"TOPAZ",
104 	"TONGA",
105 	"FIJI",
106 	"CARRIZO",
107 	"STONEY",
108 	"POLARIS10",
109 	"POLARIS11",
110 	"POLARIS12",
111 	"VEGAM",
112 	"VEGA10",
113 	"VEGA12",
114 	"VEGA20",
115 	"RAVEN",
116 	"ARCTURUS",
117 	"RENOIR",
118 	"ALDEBARAN",
119 	"NAVI10",
120 	"CYAN_SKILLFISH",
121 	"NAVI14",
122 	"NAVI12",
123 	"SIENNA_CICHLID",
124 	"NAVY_FLOUNDER",
125 	"VANGOGH",
126 	"DIMGREY_CAVEFISH",
127 	"BEIGE_GOBY",
128 	"YELLOW_CARP",
129 	"IP DISCOVERY",
130 	"LAST",
131 };
132 
133 /**
134  * DOC: pcie_replay_count
135  *
136  * The amdgpu driver provides a sysfs API for reporting the total number
137  * of PCIe replays (NAKs)
138  * The file pcie_replay_count is used for this and returns the total
139  * number of replays as a sum of the NAKs generated and NAKs received
140  */
141 
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143 		struct device_attribute *attr, char *buf)
144 {
145 	struct drm_device *ddev = dev_get_drvdata(dev);
146 	struct amdgpu_device *adev = drm_to_adev(ddev);
147 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148 
149 	return sysfs_emit(buf, "%llu\n", cnt);
150 }
151 
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153 		amdgpu_device_get_pcie_replay_count, NULL);
154 
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156 
157 /**
158  * DOC: product_name
159  *
160  * The amdgpu driver provides a sysfs API for reporting the product name
161  * for the device
162  * The file serial_number is used for this and returns the product name
163  * as returned from the FRU.
164  * NOTE: This is only available for certain server cards
165  */
166 
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168 		struct device_attribute *attr, char *buf)
169 {
170 	struct drm_device *ddev = dev_get_drvdata(dev);
171 	struct amdgpu_device *adev = drm_to_adev(ddev);
172 
173 	return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175 
176 static DEVICE_ATTR(product_name, S_IRUGO,
177 		amdgpu_device_get_product_name, NULL);
178 
179 /**
180  * DOC: product_number
181  *
182  * The amdgpu driver provides a sysfs API for reporting the part number
183  * for the device
184  * The file serial_number is used for this and returns the part number
185  * as returned from the FRU.
186  * NOTE: This is only available for certain server cards
187  */
188 
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190 		struct device_attribute *attr, char *buf)
191 {
192 	struct drm_device *ddev = dev_get_drvdata(dev);
193 	struct amdgpu_device *adev = drm_to_adev(ddev);
194 
195 	return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197 
198 static DEVICE_ATTR(product_number, S_IRUGO,
199 		amdgpu_device_get_product_number, NULL);
200 
201 /**
202  * DOC: serial_number
203  *
204  * The amdgpu driver provides a sysfs API for reporting the serial number
205  * for the device
206  * The file serial_number is used for this and returns the serial number
207  * as returned from the FRU.
208  * NOTE: This is only available for certain server cards
209  */
210 
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212 		struct device_attribute *attr, char *buf)
213 {
214 	struct drm_device *ddev = dev_get_drvdata(dev);
215 	struct amdgpu_device *adev = drm_to_adev(ddev);
216 
217 	return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219 
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221 		amdgpu_device_get_serial_number, NULL);
222 
223 /**
224  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225  *
226  * @dev: drm_device pointer
227  *
228  * Returns true if the device is a dGPU with ATPX power control,
229  * otherwise return false.
230  */
231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233 	struct amdgpu_device *adev = drm_to_adev(dev);
234 
235 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236 		return true;
237 	return false;
238 }
239 
240 /**
241  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242  *
243  * @dev: drm_device pointer
244  *
245  * Returns true if the device is a dGPU with ACPI power control,
246  * otherwise return false.
247  */
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250 	struct amdgpu_device *adev = drm_to_adev(dev);
251 
252 	if (adev->has_pr3 ||
253 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254 		return true;
255 	return false;
256 }
257 
258 /**
259  * amdgpu_device_supports_baco - Does the device support BACO
260  *
261  * @dev: drm_device pointer
262  *
263  * Returns true if the device supporte BACO,
264  * otherwise return false.
265  */
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268 	struct amdgpu_device *adev = drm_to_adev(dev);
269 
270 	return amdgpu_asic_supports_baco(adev);
271 }
272 
273 /**
274  * amdgpu_device_supports_smart_shift - Is the device dGPU with
275  * smart shift support
276  *
277  * @dev: drm_device pointer
278  *
279  * Returns true if the device is a dGPU with Smart Shift support,
280  * otherwise returns false.
281  */
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284 	return (amdgpu_device_supports_boco(dev) &&
285 		amdgpu_acpi_is_power_shift_control_supported());
286 }
287 
288 /*
289  * VRAM access helper functions
290  */
291 
292 /**
293  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294  *
295  * @adev: amdgpu_device pointer
296  * @pos: offset of the buffer in vram
297  * @buf: virtual address of the buffer in system memory
298  * @size: read/write size, sizeof(@buf) must > @size
299  * @write: true - write to vram, otherwise - read from vram
300  */
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302 			     void *buf, size_t size, bool write)
303 {
304 	unsigned long flags;
305 	uint32_t hi = ~0, tmp = 0;
306 	uint32_t *data = buf;
307 	uint64_t last;
308 	int idx;
309 
310 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
311 		return;
312 
313 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314 
315 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316 	for (last = pos + size; pos < last; pos += 4) {
317 		tmp = pos >> 31;
318 
319 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320 		if (tmp != hi) {
321 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322 			hi = tmp;
323 		}
324 		if (write)
325 			WREG32_NO_KIQ(mmMM_DATA, *data++);
326 		else
327 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
328 	}
329 
330 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331 	drm_dev_exit(idx);
332 }
333 
334 /**
335  * amdgpu_device_aper_access - access vram by vram aperature
336  *
337  * @adev: amdgpu_device pointer
338  * @pos: offset of the buffer in vram
339  * @buf: virtual address of the buffer in system memory
340  * @size: read/write size, sizeof(@buf) must > @size
341  * @write: true - write to vram, otherwise - read from vram
342  *
343  * The return value means how many bytes have been transferred.
344  */
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346 				 void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349 	void __iomem *addr;
350 	size_t count = 0;
351 	uint64_t last;
352 
353 	if (!adev->mman.aper_base_kaddr)
354 		return 0;
355 
356 	last = min(pos + size, adev->gmc.visible_vram_size);
357 	if (last > pos) {
358 		addr = adev->mman.aper_base_kaddr + pos;
359 		count = last - pos;
360 
361 		if (write) {
362 			memcpy_toio(addr, buf, count);
363 			mb();
364 			amdgpu_device_flush_hdp(adev, NULL);
365 		} else {
366 			amdgpu_device_invalidate_hdp(adev, NULL);
367 			mb();
368 			memcpy_fromio(buf, addr, count);
369 		}
370 
371 	}
372 
373 	return count;
374 #else
375 	return 0;
376 #endif
377 }
378 
379 /**
380  * amdgpu_device_vram_access - read/write a buffer in vram
381  *
382  * @adev: amdgpu_device pointer
383  * @pos: offset of the buffer in vram
384  * @buf: virtual address of the buffer in system memory
385  * @size: read/write size, sizeof(@buf) must > @size
386  * @write: true - write to vram, otherwise - read from vram
387  */
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389 			       void *buf, size_t size, bool write)
390 {
391 	size_t count;
392 
393 	/* try to using vram apreature to access vram first */
394 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395 	size -= count;
396 	if (size) {
397 		/* using MM to access rest vram */
398 		pos += count;
399 		buf += count;
400 		amdgpu_device_mm_access(adev, pos, buf, size, write);
401 	}
402 }
403 
404 /*
405  * register access helper functions.
406  */
407 
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411 	if (adev->no_hw_access)
412 		return true;
413 
414 #ifdef CONFIG_LOCKDEP
415 	/*
416 	 * This is a bit complicated to understand, so worth a comment. What we assert
417 	 * here is that the GPU reset is not running on another thread in parallel.
418 	 *
419 	 * For this we trylock the read side of the reset semaphore, if that succeeds
420 	 * we know that the reset is not running in paralell.
421 	 *
422 	 * If the trylock fails we assert that we are either already holding the read
423 	 * side of the lock or are the reset thread itself and hold the write side of
424 	 * the lock.
425 	 */
426 	if (in_task()) {
427 		if (down_read_trylock(&adev->reset_domain->sem))
428 			up_read(&adev->reset_domain->sem);
429 		else
430 			lockdep_assert_held(&adev->reset_domain->sem);
431 	}
432 #endif
433 	return false;
434 }
435 
436 /**
437  * amdgpu_device_rreg - read a memory mapped IO or indirect register
438  *
439  * @adev: amdgpu_device pointer
440  * @reg: dword aligned register offset
441  * @acc_flags: access flags which require special behavior
442  *
443  * Returns the 32 bit value from the offset specified.
444  */
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446 			    uint32_t reg, uint32_t acc_flags)
447 {
448 	uint32_t ret;
449 
450 	if (amdgpu_device_skip_hw_access(adev))
451 		return 0;
452 
453 	if ((reg * 4) < adev->rmmio_size) {
454 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455 		    amdgpu_sriov_runtime(adev) &&
456 		    down_read_trylock(&adev->reset_domain->sem)) {
457 			ret = amdgpu_kiq_rreg(adev, reg);
458 			up_read(&adev->reset_domain->sem);
459 		} else {
460 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461 		}
462 	} else {
463 		ret = adev->pcie_rreg(adev, reg * 4);
464 	}
465 
466 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467 
468 	return ret;
469 }
470 
471 /*
472  * MMIO register read with bytes helper functions
473  * @offset:bytes offset from MMIO start
474  *
475 */
476 
477 /**
478  * amdgpu_mm_rreg8 - read a memory mapped IO register
479  *
480  * @adev: amdgpu_device pointer
481  * @offset: byte aligned register offset
482  *
483  * Returns the 8 bit value from the offset specified.
484  */
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487 	if (amdgpu_device_skip_hw_access(adev))
488 		return 0;
489 
490 	if (offset < adev->rmmio_size)
491 		return (readb(adev->rmmio + offset));
492 	BUG();
493 }
494 
495 /*
496  * MMIO register write with bytes helper functions
497  * @offset:bytes offset from MMIO start
498  * @value: the value want to be written to the register
499  *
500 */
501 /**
502  * amdgpu_mm_wreg8 - read a memory mapped IO register
503  *
504  * @adev: amdgpu_device pointer
505  * @offset: byte aligned register offset
506  * @value: 8 bit value to write
507  *
508  * Writes the value specified to the offset specified.
509  */
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512 	if (amdgpu_device_skip_hw_access(adev))
513 		return;
514 
515 	if (offset < adev->rmmio_size)
516 		writeb(value, adev->rmmio + offset);
517 	else
518 		BUG();
519 }
520 
521 /**
522  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523  *
524  * @adev: amdgpu_device pointer
525  * @reg: dword aligned register offset
526  * @v: 32 bit value to write to the register
527  * @acc_flags: access flags which require special behavior
528  *
529  * Writes the value specified to the offset specified.
530  */
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532 			uint32_t reg, uint32_t v,
533 			uint32_t acc_flags)
534 {
535 	if (amdgpu_device_skip_hw_access(adev))
536 		return;
537 
538 	if ((reg * 4) < adev->rmmio_size) {
539 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540 		    amdgpu_sriov_runtime(adev) &&
541 		    down_read_trylock(&adev->reset_domain->sem)) {
542 			amdgpu_kiq_wreg(adev, reg, v);
543 			up_read(&adev->reset_domain->sem);
544 		} else {
545 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546 		}
547 	} else {
548 		adev->pcie_wreg(adev, reg * 4, v);
549 	}
550 
551 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553 
554 /**
555  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
556  *
557  * @adev: amdgpu_device pointer
558  * @reg: mmio/rlc register
559  * @v: value to write
560  *
561  * this function is invoked only for the debugfs register access
562  */
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564 			     uint32_t reg, uint32_t v)
565 {
566 	if (amdgpu_device_skip_hw_access(adev))
567 		return;
568 
569 	if (amdgpu_sriov_fullaccess(adev) &&
570 	    adev->gfx.rlc.funcs &&
571 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
572 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573 			return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574 	} else if ((reg * 4) >= adev->rmmio_size) {
575 		adev->pcie_wreg(adev, reg * 4, v);
576 	} else {
577 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578 	}
579 }
580 
581 /**
582  * amdgpu_mm_rdoorbell - read a doorbell dword
583  *
584  * @adev: amdgpu_device pointer
585  * @index: doorbell index
586  *
587  * Returns the value in the doorbell aperture at the
588  * requested doorbell index (CIK).
589  */
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592 	if (amdgpu_device_skip_hw_access(adev))
593 		return 0;
594 
595 	if (index < adev->doorbell.num_doorbells) {
596 		return readl(adev->doorbell.ptr + index);
597 	} else {
598 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599 		return 0;
600 	}
601 }
602 
603 /**
604  * amdgpu_mm_wdoorbell - write a doorbell dword
605  *
606  * @adev: amdgpu_device pointer
607  * @index: doorbell index
608  * @v: value to write
609  *
610  * Writes @v to the doorbell aperture at the
611  * requested doorbell index (CIK).
612  */
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615 	if (amdgpu_device_skip_hw_access(adev))
616 		return;
617 
618 	if (index < adev->doorbell.num_doorbells) {
619 		writel(v, adev->doorbell.ptr + index);
620 	} else {
621 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622 	}
623 }
624 
625 /**
626  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627  *
628  * @adev: amdgpu_device pointer
629  * @index: doorbell index
630  *
631  * Returns the value in the doorbell aperture at the
632  * requested doorbell index (VEGA10+).
633  */
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636 	if (amdgpu_device_skip_hw_access(adev))
637 		return 0;
638 
639 	if (index < adev->doorbell.num_doorbells) {
640 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641 	} else {
642 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643 		return 0;
644 	}
645 }
646 
647 /**
648  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649  *
650  * @adev: amdgpu_device pointer
651  * @index: doorbell index
652  * @v: value to write
653  *
654  * Writes @v to the doorbell aperture at the
655  * requested doorbell index (VEGA10+).
656  */
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659 	if (amdgpu_device_skip_hw_access(adev))
660 		return;
661 
662 	if (index < adev->doorbell.num_doorbells) {
663 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664 	} else {
665 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666 	}
667 }
668 
669 /**
670  * amdgpu_device_indirect_rreg - read an indirect register
671  *
672  * @adev: amdgpu_device pointer
673  * @pcie_index: mmio register offset
674  * @pcie_data: mmio register offset
675  * @reg_addr: indirect register address to read from
676  *
677  * Returns the value of indirect register @reg_addr
678  */
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680 				u32 pcie_index, u32 pcie_data,
681 				u32 reg_addr)
682 {
683 	unsigned long flags;
684 	u32 r;
685 	void __iomem *pcie_index_offset;
686 	void __iomem *pcie_data_offset;
687 
688 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691 
692 	writel(reg_addr, pcie_index_offset);
693 	readl(pcie_index_offset);
694 	r = readl(pcie_data_offset);
695 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696 
697 	return r;
698 }
699 
700 /**
701  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702  *
703  * @adev: amdgpu_device pointer
704  * @pcie_index: mmio register offset
705  * @pcie_data: mmio register offset
706  * @reg_addr: indirect register address to read from
707  *
708  * Returns the value of indirect register @reg_addr
709  */
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711 				  u32 pcie_index, u32 pcie_data,
712 				  u32 reg_addr)
713 {
714 	unsigned long flags;
715 	u64 r;
716 	void __iomem *pcie_index_offset;
717 	void __iomem *pcie_data_offset;
718 
719 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722 
723 	/* read low 32 bits */
724 	writel(reg_addr, pcie_index_offset);
725 	readl(pcie_index_offset);
726 	r = readl(pcie_data_offset);
727 	/* read high 32 bits */
728 	writel(reg_addr + 4, pcie_index_offset);
729 	readl(pcie_index_offset);
730 	r |= ((u64)readl(pcie_data_offset) << 32);
731 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732 
733 	return r;
734 }
735 
736 /**
737  * amdgpu_device_indirect_wreg - write an indirect register address
738  *
739  * @adev: amdgpu_device pointer
740  * @pcie_index: mmio register offset
741  * @pcie_data: mmio register offset
742  * @reg_addr: indirect register offset
743  * @reg_data: indirect register data
744  *
745  */
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747 				 u32 pcie_index, u32 pcie_data,
748 				 u32 reg_addr, u32 reg_data)
749 {
750 	unsigned long flags;
751 	void __iomem *pcie_index_offset;
752 	void __iomem *pcie_data_offset;
753 
754 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757 
758 	writel(reg_addr, pcie_index_offset);
759 	readl(pcie_index_offset);
760 	writel(reg_data, pcie_data_offset);
761 	readl(pcie_data_offset);
762 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764 
765 /**
766  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767  *
768  * @adev: amdgpu_device pointer
769  * @pcie_index: mmio register offset
770  * @pcie_data: mmio register offset
771  * @reg_addr: indirect register offset
772  * @reg_data: indirect register data
773  *
774  */
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776 				   u32 pcie_index, u32 pcie_data,
777 				   u32 reg_addr, u64 reg_data)
778 {
779 	unsigned long flags;
780 	void __iomem *pcie_index_offset;
781 	void __iomem *pcie_data_offset;
782 
783 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786 
787 	/* write low 32 bits */
788 	writel(reg_addr, pcie_index_offset);
789 	readl(pcie_index_offset);
790 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791 	readl(pcie_data_offset);
792 	/* write high 32 bits */
793 	writel(reg_addr + 4, pcie_index_offset);
794 	readl(pcie_index_offset);
795 	writel((u32)(reg_data >> 32), pcie_data_offset);
796 	readl(pcie_data_offset);
797 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799 
800 /**
801  * amdgpu_invalid_rreg - dummy reg read function
802  *
803  * @adev: amdgpu_device pointer
804  * @reg: offset of register
805  *
806  * Dummy register read function.  Used for register blocks
807  * that certain asics don't have (all asics).
808  * Returns the value in the register.
809  */
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813 	BUG();
814 	return 0;
815 }
816 
817 /**
818  * amdgpu_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @reg: offset of register
822  * @v: value to write to the register
823  *
824  * Dummy register read function.  Used for register blocks
825  * that certain asics don't have (all asics).
826  */
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830 		  reg, v);
831 	BUG();
832 }
833 
834 /**
835  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836  *
837  * @adev: amdgpu_device pointer
838  * @reg: offset of register
839  *
840  * Dummy register read function.  Used for register blocks
841  * that certain asics don't have (all asics).
842  * Returns the value in the register.
843  */
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846 	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847 	BUG();
848 	return 0;
849 }
850 
851 /**
852  * amdgpu_invalid_wreg64 - dummy reg write function
853  *
854  * @adev: amdgpu_device pointer
855  * @reg: offset of register
856  * @v: value to write to the register
857  *
858  * Dummy register read function.  Used for register blocks
859  * that certain asics don't have (all asics).
860  */
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863 	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864 		  reg, v);
865 	BUG();
866 }
867 
868 /**
869  * amdgpu_block_invalid_rreg - dummy reg read function
870  *
871  * @adev: amdgpu_device pointer
872  * @block: offset of instance
873  * @reg: offset of register
874  *
875  * Dummy register read function.  Used for register blocks
876  * that certain asics don't have (all asics).
877  * Returns the value in the register.
878  */
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880 					  uint32_t block, uint32_t reg)
881 {
882 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883 		  reg, block);
884 	BUG();
885 	return 0;
886 }
887 
888 /**
889  * amdgpu_block_invalid_wreg - dummy reg write function
890  *
891  * @adev: amdgpu_device pointer
892  * @block: offset of instance
893  * @reg: offset of register
894  * @v: value to write to the register
895  *
896  * Dummy register read function.  Used for register blocks
897  * that certain asics don't have (all asics).
898  */
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900 				      uint32_t block,
901 				      uint32_t reg, uint32_t v)
902 {
903 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904 		  reg, block, v);
905 	BUG();
906 }
907 
908 /**
909  * amdgpu_device_asic_init - Wrapper for atom asic_init
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Does any asic specific work and then calls atom asic init.
914  */
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917 	amdgpu_asic_pre_asic_init(adev);
918 
919 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
920 		return amdgpu_atomfirmware_asic_init(adev, true);
921 	else
922 		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
923 }
924 
925 /**
926  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
927  *
928  * @adev: amdgpu_device pointer
929  *
930  * Allocates a scratch page of VRAM for use by various things in the
931  * driver.
932  */
933 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
934 {
935 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
936 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
937 				       &adev->vram_scratch.robj,
938 				       &adev->vram_scratch.gpu_addr,
939 				       (void **)&adev->vram_scratch.ptr);
940 }
941 
942 /**
943  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
944  *
945  * @adev: amdgpu_device pointer
946  *
947  * Frees the VRAM scratch page.
948  */
949 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
950 {
951 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
952 }
953 
954 /**
955  * amdgpu_device_program_register_sequence - program an array of registers.
956  *
957  * @adev: amdgpu_device pointer
958  * @registers: pointer to the register array
959  * @array_size: size of the register array
960  *
961  * Programs an array or registers with and and or masks.
962  * This is a helper for setting golden registers.
963  */
964 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965 					     const u32 *registers,
966 					     const u32 array_size)
967 {
968 	u32 tmp, reg, and_mask, or_mask;
969 	int i;
970 
971 	if (array_size % 3)
972 		return;
973 
974 	for (i = 0; i < array_size; i +=3) {
975 		reg = registers[i + 0];
976 		and_mask = registers[i + 1];
977 		or_mask = registers[i + 2];
978 
979 		if (and_mask == 0xffffffff) {
980 			tmp = or_mask;
981 		} else {
982 			tmp = RREG32(reg);
983 			tmp &= ~and_mask;
984 			if (adev->family >= AMDGPU_FAMILY_AI)
985 				tmp |= (or_mask & and_mask);
986 			else
987 				tmp |= or_mask;
988 		}
989 		WREG32(reg, tmp);
990 	}
991 }
992 
993 /**
994  * amdgpu_device_pci_config_reset - reset the GPU
995  *
996  * @adev: amdgpu_device pointer
997  *
998  * Resets the GPU using the pci config reset sequence.
999  * Only applicable to asics prior to vega10.
1000  */
1001 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1002 {
1003 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1004 }
1005 
1006 /**
1007  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1008  *
1009  * @adev: amdgpu_device pointer
1010  *
1011  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1012  */
1013 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1014 {
1015 	STUB();
1016 	return -ENOSYS;
1017 #ifdef notyet
1018 	return pci_reset_function(adev->pdev);
1019 #endif
1020 }
1021 
1022 /*
1023  * GPU doorbell aperture helpers function.
1024  */
1025 /**
1026  * amdgpu_device_doorbell_init - Init doorbell driver information.
1027  *
1028  * @adev: amdgpu_device pointer
1029  *
1030  * Init doorbell driver information (CIK)
1031  * Returns 0 on success, error on failure.
1032  */
1033 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1034 {
1035 
1036 	/* No doorbell on SI hardware generation */
1037 	if (adev->asic_type < CHIP_BONAIRE) {
1038 		adev->doorbell.base = 0;
1039 		adev->doorbell.size = 0;
1040 		adev->doorbell.num_doorbells = 0;
1041 		adev->doorbell.ptr = NULL;
1042 		return 0;
1043 	}
1044 
1045 #ifdef __linux__
1046 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1047 		return -EINVAL;
1048 #endif
1049 
1050 	amdgpu_asic_init_doorbell_index(adev);
1051 
1052 	/* doorbell bar mapping */
1053 #ifdef __linux__
1054 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1055 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1056 #endif
1057 
1058 	if (adev->enable_mes) {
1059 		adev->doorbell.num_doorbells =
1060 			adev->doorbell.size / sizeof(u32);
1061 	} else {
1062 		adev->doorbell.num_doorbells =
1063 			min_t(u32, adev->doorbell.size / sizeof(u32),
1064 			      adev->doorbell_index.max_assignment+1);
1065 		if (adev->doorbell.num_doorbells == 0)
1066 			return -EINVAL;
1067 
1068 		/* For Vega, reserve and map two pages on doorbell BAR since SDMA
1069 		 * paging queue doorbell use the second page. The
1070 		 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1071 		 * doorbells are in the first page. So with paging queue enabled,
1072 		 * the max num_doorbells should + 1 page (0x400 in dword)
1073 		 */
1074 		if (adev->asic_type >= CHIP_VEGA10)
1075 			adev->doorbell.num_doorbells += 0x400;
1076 	}
1077 
1078 #ifdef __linux__
1079 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
1080 				     adev->doorbell.num_doorbells *
1081 				     sizeof(u32));
1082 	if (adev->doorbell.ptr == NULL)
1083 		return -ENOMEM;
1084 #endif
1085 
1086 	return 0;
1087 }
1088 
1089 /**
1090  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1091  *
1092  * @adev: amdgpu_device pointer
1093  *
1094  * Tear down doorbell driver information (CIK)
1095  */
1096 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1097 {
1098 #ifdef __linux__
1099 	iounmap(adev->doorbell.ptr);
1100 #else
1101 	if (adev->doorbell.size > 0)
1102 		bus_space_unmap(adev->doorbell.bst, adev->doorbell.bsh,
1103 		    adev->doorbell.size);
1104 #endif
1105 	adev->doorbell.ptr = NULL;
1106 }
1107 
1108 
1109 
1110 /*
1111  * amdgpu_device_wb_*()
1112  * Writeback is the method by which the GPU updates special pages in memory
1113  * with the status of certain GPU events (fences, ring pointers,etc.).
1114  */
1115 
1116 /**
1117  * amdgpu_device_wb_fini - Disable Writeback and free memory
1118  *
1119  * @adev: amdgpu_device pointer
1120  *
1121  * Disables Writeback and frees the Writeback memory (all asics).
1122  * Used at driver shutdown.
1123  */
1124 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1125 {
1126 	if (adev->wb.wb_obj) {
1127 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1128 				      &adev->wb.gpu_addr,
1129 				      (void **)&adev->wb.wb);
1130 		adev->wb.wb_obj = NULL;
1131 	}
1132 }
1133 
1134 /**
1135  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1136  *
1137  * @adev: amdgpu_device pointer
1138  *
1139  * Initializes writeback and allocates writeback memory (all asics).
1140  * Used at driver startup.
1141  * Returns 0 on success or an -error on failure.
1142  */
1143 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1144 {
1145 	int r;
1146 
1147 	if (adev->wb.wb_obj == NULL) {
1148 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1149 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1150 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1151 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1152 					    (void **)&adev->wb.wb);
1153 		if (r) {
1154 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1155 			return r;
1156 		}
1157 
1158 		adev->wb.num_wb = AMDGPU_MAX_WB;
1159 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1160 
1161 		/* clear wb memory */
1162 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 /**
1169  * amdgpu_device_wb_get - Allocate a wb entry
1170  *
1171  * @adev: amdgpu_device pointer
1172  * @wb: wb index
1173  *
1174  * Allocate a wb slot for use by the driver (all asics).
1175  * Returns 0 on success or -EINVAL on failure.
1176  */
1177 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1178 {
1179 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1180 
1181 	if (offset < adev->wb.num_wb) {
1182 		__set_bit(offset, adev->wb.used);
1183 		*wb = offset << 3; /* convert to dw offset */
1184 		return 0;
1185 	} else {
1186 		return -EINVAL;
1187 	}
1188 }
1189 
1190 /**
1191  * amdgpu_device_wb_free - Free a wb entry
1192  *
1193  * @adev: amdgpu_device pointer
1194  * @wb: wb index
1195  *
1196  * Free a wb slot allocated for use by the driver (all asics)
1197  */
1198 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1199 {
1200 	wb >>= 3;
1201 	if (wb < adev->wb.num_wb)
1202 		__clear_bit(wb, adev->wb.used);
1203 }
1204 
1205 /**
1206  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1207  *
1208  * @adev: amdgpu_device pointer
1209  *
1210  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1211  * to fail, but if any of the BARs is not accessible after the size we abort
1212  * driver loading by returning -ENODEV.
1213  */
1214 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1215 {
1216 #ifdef __linux__
1217 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1218 	struct pci_bus *root;
1219 	struct resource *res;
1220 	unsigned i;
1221 	u16 cmd;
1222 	int r;
1223 
1224 	/* Bypass for VF */
1225 	if (amdgpu_sriov_vf(adev))
1226 		return 0;
1227 
1228 	/* skip if the bios has already enabled large BAR */
1229 	if (adev->gmc.real_vram_size &&
1230 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1231 		return 0;
1232 
1233 	/* Check if the root BUS has 64bit memory resources */
1234 	root = adev->pdev->bus;
1235 	while (root->parent)
1236 		root = root->parent;
1237 
1238 	pci_bus_for_each_resource(root, res, i) {
1239 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1240 		    res->start > 0x100000000ull)
1241 			break;
1242 	}
1243 
1244 	/* Trying to resize is pointless without a root hub window above 4GB */
1245 	if (!res)
1246 		return 0;
1247 
1248 	/* Limit the BAR size to what is available */
1249 	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1250 			rbar_size);
1251 
1252 	/* Disable memory decoding while we change the BAR addresses and size */
1253 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1254 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1255 			      cmd & ~PCI_COMMAND_MEMORY);
1256 
1257 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1258 	amdgpu_device_doorbell_fini(adev);
1259 	if (adev->asic_type >= CHIP_BONAIRE)
1260 		pci_release_resource(adev->pdev, 2);
1261 
1262 	pci_release_resource(adev->pdev, 0);
1263 
1264 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1265 	if (r == -ENOSPC)
1266 		DRM_INFO("Not enough PCI address space for a large BAR.");
1267 	else if (r && r != -ENOTSUPP)
1268 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1269 
1270 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1271 
1272 	/* When the doorbell or fb BAR isn't available we have no chance of
1273 	 * using the device.
1274 	 */
1275 	r = amdgpu_device_doorbell_init(adev);
1276 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1277 		return -ENODEV;
1278 
1279 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1280 #endif /* __linux__ */
1281 
1282 	return 0;
1283 }
1284 
1285 /*
1286  * GPU helpers function.
1287  */
1288 /**
1289  * amdgpu_device_need_post - check if the hw need post or not
1290  *
1291  * @adev: amdgpu_device pointer
1292  *
1293  * Check if the asic has been initialized (all asics) at driver startup
1294  * or post is needed if  hw reset is performed.
1295  * Returns true if need or false if not.
1296  */
1297 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1298 {
1299 	uint32_t reg;
1300 
1301 	if (amdgpu_sriov_vf(adev))
1302 		return false;
1303 
1304 	if (amdgpu_passthrough(adev)) {
1305 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1306 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1307 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1308 		 * vpost executed for smc version below 22.15
1309 		 */
1310 		if (adev->asic_type == CHIP_FIJI) {
1311 			int err;
1312 			uint32_t fw_ver;
1313 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1314 			/* force vPost if error occured */
1315 			if (err)
1316 				return true;
1317 
1318 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1319 			if (fw_ver < 0x00160e00)
1320 				return true;
1321 		}
1322 	}
1323 
1324 	/* Don't post if we need to reset whole hive on init */
1325 	if (adev->gmc.xgmi.pending_reset)
1326 		return false;
1327 
1328 	if (adev->has_hw_reset) {
1329 		adev->has_hw_reset = false;
1330 		return true;
1331 	}
1332 
1333 	/* bios scratch used on CIK+ */
1334 	if (adev->asic_type >= CHIP_BONAIRE)
1335 		return amdgpu_atombios_scratch_need_asic_init(adev);
1336 
1337 	/* check MEM_SIZE for older asics */
1338 	reg = amdgpu_asic_get_config_memsize(adev);
1339 
1340 	if ((reg != 0) && (reg != 0xffffffff))
1341 		return false;
1342 
1343 	return true;
1344 }
1345 
1346 /**
1347  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1348  *
1349  * @adev: amdgpu_device pointer
1350  *
1351  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1352  * be set for this device.
1353  *
1354  * Returns true if it should be used or false if not.
1355  */
1356 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1357 {
1358 	switch (amdgpu_aspm) {
1359 	case -1:
1360 		break;
1361 	case 0:
1362 		return false;
1363 	case 1:
1364 		return true;
1365 	default:
1366 		return false;
1367 	}
1368 	return pcie_aspm_enabled(adev->pdev);
1369 }
1370 
1371 /* if we get transitioned to only one device, take VGA back */
1372 /**
1373  * amdgpu_device_vga_set_decode - enable/disable vga decode
1374  *
1375  * @pdev: PCI device pointer
1376  * @state: enable/disable vga decode
1377  *
1378  * Enable/disable vga decode (all asics).
1379  * Returns VGA resource flags.
1380  */
1381 #ifdef notyet
1382 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1383 		bool state)
1384 {
1385 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1386 	amdgpu_asic_set_vga_state(adev, state);
1387 	if (state)
1388 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1389 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1390 	else
1391 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1392 }
1393 #endif
1394 
1395 /**
1396  * amdgpu_device_check_block_size - validate the vm block size
1397  *
1398  * @adev: amdgpu_device pointer
1399  *
1400  * Validates the vm block size specified via module parameter.
1401  * The vm block size defines number of bits in page table versus page directory,
1402  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1403  * page table and the remaining bits are in the page directory.
1404  */
1405 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1406 {
1407 	/* defines number of bits in page table versus page directory,
1408 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1409 	 * page table and the remaining bits are in the page directory */
1410 	if (amdgpu_vm_block_size == -1)
1411 		return;
1412 
1413 	if (amdgpu_vm_block_size < 9) {
1414 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1415 			 amdgpu_vm_block_size);
1416 		amdgpu_vm_block_size = -1;
1417 	}
1418 }
1419 
1420 /**
1421  * amdgpu_device_check_vm_size - validate the vm size
1422  *
1423  * @adev: amdgpu_device pointer
1424  *
1425  * Validates the vm size in GB specified via module parameter.
1426  * The VM size is the size of the GPU virtual memory space in GB.
1427  */
1428 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1429 {
1430 	/* no need to check the default value */
1431 	if (amdgpu_vm_size == -1)
1432 		return;
1433 
1434 	if (amdgpu_vm_size < 1) {
1435 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1436 			 amdgpu_vm_size);
1437 		amdgpu_vm_size = -1;
1438 	}
1439 }
1440 
1441 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1442 {
1443 #ifdef __linux__
1444 	struct sysinfo si;
1445 #endif
1446 	bool is_os_64 = (sizeof(void *) == 8);
1447 	uint64_t total_memory;
1448 	uint64_t dram_size_seven_GB = 0x1B8000000;
1449 	uint64_t dram_size_three_GB = 0xB8000000;
1450 
1451 	if (amdgpu_smu_memory_pool_size == 0)
1452 		return;
1453 
1454 	if (!is_os_64) {
1455 		DRM_WARN("Not 64-bit OS, feature not supported\n");
1456 		goto def_value;
1457 	}
1458 #ifdef __linux__
1459 	si_meminfo(&si);
1460 	total_memory = (uint64_t)si.totalram * si.mem_unit;
1461 #else
1462 	total_memory = ptoa(physmem);
1463 #endif
1464 
1465 	if ((amdgpu_smu_memory_pool_size == 1) ||
1466 		(amdgpu_smu_memory_pool_size == 2)) {
1467 		if (total_memory < dram_size_three_GB)
1468 			goto def_value1;
1469 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1470 		(amdgpu_smu_memory_pool_size == 8)) {
1471 		if (total_memory < dram_size_seven_GB)
1472 			goto def_value1;
1473 	} else {
1474 		DRM_WARN("Smu memory pool size not supported\n");
1475 		goto def_value;
1476 	}
1477 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1478 
1479 	return;
1480 
1481 def_value1:
1482 	DRM_WARN("No enough system memory\n");
1483 def_value:
1484 	adev->pm.smu_prv_buffer_size = 0;
1485 }
1486 
1487 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1488 {
1489 	if (!(adev->flags & AMD_IS_APU) ||
1490 	    adev->asic_type < CHIP_RAVEN)
1491 		return 0;
1492 
1493 	switch (adev->asic_type) {
1494 	case CHIP_RAVEN:
1495 		if (adev->pdev->device == 0x15dd)
1496 			adev->apu_flags |= AMD_APU_IS_RAVEN;
1497 		if (adev->pdev->device == 0x15d8)
1498 			adev->apu_flags |= AMD_APU_IS_PICASSO;
1499 		break;
1500 	case CHIP_RENOIR:
1501 		if ((adev->pdev->device == 0x1636) ||
1502 		    (adev->pdev->device == 0x164c))
1503 			adev->apu_flags |= AMD_APU_IS_RENOIR;
1504 		else
1505 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1506 		break;
1507 	case CHIP_VANGOGH:
1508 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1509 		break;
1510 	case CHIP_YELLOW_CARP:
1511 		break;
1512 	case CHIP_CYAN_SKILLFISH:
1513 		if ((adev->pdev->device == 0x13FE) ||
1514 		    (adev->pdev->device == 0x143F))
1515 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1516 		break;
1517 	default:
1518 		break;
1519 	}
1520 
1521 	return 0;
1522 }
1523 
1524 /**
1525  * amdgpu_device_check_arguments - validate module params
1526  *
1527  * @adev: amdgpu_device pointer
1528  *
1529  * Validates certain module parameters and updates
1530  * the associated values used by the driver (all asics).
1531  */
1532 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1533 {
1534 	if (amdgpu_sched_jobs < 4) {
1535 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1536 			 amdgpu_sched_jobs);
1537 		amdgpu_sched_jobs = 4;
1538 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1539 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1540 			 amdgpu_sched_jobs);
1541 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1542 	}
1543 
1544 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1545 		/* gart size must be greater or equal to 32M */
1546 		dev_warn(adev->dev, "gart size (%d) too small\n",
1547 			 amdgpu_gart_size);
1548 		amdgpu_gart_size = -1;
1549 	}
1550 
1551 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1552 		/* gtt size must be greater or equal to 32M */
1553 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1554 				 amdgpu_gtt_size);
1555 		amdgpu_gtt_size = -1;
1556 	}
1557 
1558 	/* valid range is between 4 and 9 inclusive */
1559 	if (amdgpu_vm_fragment_size != -1 &&
1560 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1561 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1562 		amdgpu_vm_fragment_size = -1;
1563 	}
1564 
1565 	if (amdgpu_sched_hw_submission < 2) {
1566 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1567 			 amdgpu_sched_hw_submission);
1568 		amdgpu_sched_hw_submission = 2;
1569 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1570 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1571 			 amdgpu_sched_hw_submission);
1572 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1573 	}
1574 
1575 	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1576 		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1577 		amdgpu_reset_method = -1;
1578 	}
1579 
1580 	amdgpu_device_check_smu_prv_buffer_size(adev);
1581 
1582 	amdgpu_device_check_vm_size(adev);
1583 
1584 	amdgpu_device_check_block_size(adev);
1585 
1586 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1587 
1588 	return 0;
1589 }
1590 
1591 #ifdef __linux__
1592 /**
1593  * amdgpu_switcheroo_set_state - set switcheroo state
1594  *
1595  * @pdev: pci dev pointer
1596  * @state: vga_switcheroo state
1597  *
1598  * Callback for the switcheroo driver.  Suspends or resumes the
1599  * the asics before or after it is powered up using ACPI methods.
1600  */
1601 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1602 					enum vga_switcheroo_state state)
1603 {
1604 	struct drm_device *dev = pci_get_drvdata(pdev);
1605 	int r;
1606 
1607 	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1608 		return;
1609 
1610 	if (state == VGA_SWITCHEROO_ON) {
1611 		pr_info("switched on\n");
1612 		/* don't suspend or resume card normally */
1613 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1614 
1615 		pci_set_power_state(pdev, PCI_D0);
1616 		amdgpu_device_load_pci_state(pdev);
1617 		r = pci_enable_device(pdev);
1618 		if (r)
1619 			DRM_WARN("pci_enable_device failed (%d)\n", r);
1620 		amdgpu_device_resume(dev, true);
1621 
1622 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1623 	} else {
1624 		pr_info("switched off\n");
1625 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1626 		amdgpu_device_suspend(dev, true);
1627 		amdgpu_device_cache_pci_state(pdev);
1628 		/* Shut down the device */
1629 		pci_disable_device(pdev);
1630 		pci_set_power_state(pdev, PCI_D3cold);
1631 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1632 	}
1633 }
1634 
1635 /**
1636  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1637  *
1638  * @pdev: pci dev pointer
1639  *
1640  * Callback for the switcheroo driver.  Check of the switcheroo
1641  * state can be changed.
1642  * Returns true if the state can be changed, false if not.
1643  */
1644 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1645 {
1646 	struct drm_device *dev = pci_get_drvdata(pdev);
1647 
1648 	/*
1649 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1650 	* locking inversion with the driver load path. And the access here is
1651 	* completely racy anyway. So don't bother with locking for now.
1652 	*/
1653 	return atomic_read(&dev->open_count) == 0;
1654 }
1655 #endif /* __linux__ */
1656 
1657 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1658 #ifdef notyet
1659 	.set_gpu_state = amdgpu_switcheroo_set_state,
1660 	.reprobe = NULL,
1661 	.can_switch = amdgpu_switcheroo_can_switch,
1662 #endif
1663 };
1664 
1665 /**
1666  * amdgpu_device_ip_set_clockgating_state - set the CG state
1667  *
1668  * @dev: amdgpu_device pointer
1669  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1670  * @state: clockgating state (gate or ungate)
1671  *
1672  * Sets the requested clockgating state for all instances of
1673  * the hardware IP specified.
1674  * Returns the error code from the last instance.
1675  */
1676 int amdgpu_device_ip_set_clockgating_state(void *dev,
1677 					   enum amd_ip_block_type block_type,
1678 					   enum amd_clockgating_state state)
1679 {
1680 	struct amdgpu_device *adev = dev;
1681 	int i, r = 0;
1682 
1683 	for (i = 0; i < adev->num_ip_blocks; i++) {
1684 		if (!adev->ip_blocks[i].status.valid)
1685 			continue;
1686 		if (adev->ip_blocks[i].version->type != block_type)
1687 			continue;
1688 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1689 			continue;
1690 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1691 			(void *)adev, state);
1692 		if (r)
1693 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1694 				  adev->ip_blocks[i].version->funcs->name, r);
1695 	}
1696 	return r;
1697 }
1698 
1699 /**
1700  * amdgpu_device_ip_set_powergating_state - set the PG state
1701  *
1702  * @dev: amdgpu_device pointer
1703  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1704  * @state: powergating state (gate or ungate)
1705  *
1706  * Sets the requested powergating state for all instances of
1707  * the hardware IP specified.
1708  * Returns the error code from the last instance.
1709  */
1710 int amdgpu_device_ip_set_powergating_state(void *dev,
1711 					   enum amd_ip_block_type block_type,
1712 					   enum amd_powergating_state state)
1713 {
1714 	struct amdgpu_device *adev = dev;
1715 	int i, r = 0;
1716 
1717 	for (i = 0; i < adev->num_ip_blocks; i++) {
1718 		if (!adev->ip_blocks[i].status.valid)
1719 			continue;
1720 		if (adev->ip_blocks[i].version->type != block_type)
1721 			continue;
1722 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1723 			continue;
1724 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1725 			(void *)adev, state);
1726 		if (r)
1727 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1728 				  adev->ip_blocks[i].version->funcs->name, r);
1729 	}
1730 	return r;
1731 }
1732 
1733 /**
1734  * amdgpu_device_ip_get_clockgating_state - get the CG state
1735  *
1736  * @adev: amdgpu_device pointer
1737  * @flags: clockgating feature flags
1738  *
1739  * Walks the list of IPs on the device and updates the clockgating
1740  * flags for each IP.
1741  * Updates @flags with the feature flags for each hardware IP where
1742  * clockgating is enabled.
1743  */
1744 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1745 					    u64 *flags)
1746 {
1747 	int i;
1748 
1749 	for (i = 0; i < adev->num_ip_blocks; i++) {
1750 		if (!adev->ip_blocks[i].status.valid)
1751 			continue;
1752 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1753 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1754 	}
1755 }
1756 
1757 /**
1758  * amdgpu_device_ip_wait_for_idle - wait for idle
1759  *
1760  * @adev: amdgpu_device pointer
1761  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1762  *
1763  * Waits for the request hardware IP to be idle.
1764  * Returns 0 for success or a negative error code on failure.
1765  */
1766 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1767 				   enum amd_ip_block_type block_type)
1768 {
1769 	int i, r;
1770 
1771 	for (i = 0; i < adev->num_ip_blocks; i++) {
1772 		if (!adev->ip_blocks[i].status.valid)
1773 			continue;
1774 		if (adev->ip_blocks[i].version->type == block_type) {
1775 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1776 			if (r)
1777 				return r;
1778 			break;
1779 		}
1780 	}
1781 	return 0;
1782 
1783 }
1784 
1785 /**
1786  * amdgpu_device_ip_is_idle - is the hardware IP idle
1787  *
1788  * @adev: amdgpu_device pointer
1789  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1790  *
1791  * Check if the hardware IP is idle or not.
1792  * Returns true if it the IP is idle, false if not.
1793  */
1794 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1795 			      enum amd_ip_block_type block_type)
1796 {
1797 	int i;
1798 
1799 	for (i = 0; i < adev->num_ip_blocks; i++) {
1800 		if (!adev->ip_blocks[i].status.valid)
1801 			continue;
1802 		if (adev->ip_blocks[i].version->type == block_type)
1803 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1804 	}
1805 	return true;
1806 
1807 }
1808 
1809 /**
1810  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1811  *
1812  * @adev: amdgpu_device pointer
1813  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1814  *
1815  * Returns a pointer to the hardware IP block structure
1816  * if it exists for the asic, otherwise NULL.
1817  */
1818 struct amdgpu_ip_block *
1819 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1820 			      enum amd_ip_block_type type)
1821 {
1822 	int i;
1823 
1824 	for (i = 0; i < adev->num_ip_blocks; i++)
1825 		if (adev->ip_blocks[i].version->type == type)
1826 			return &adev->ip_blocks[i];
1827 
1828 	return NULL;
1829 }
1830 
1831 /**
1832  * amdgpu_device_ip_block_version_cmp
1833  *
1834  * @adev: amdgpu_device pointer
1835  * @type: enum amd_ip_block_type
1836  * @major: major version
1837  * @minor: minor version
1838  *
1839  * return 0 if equal or greater
1840  * return 1 if smaller or the ip_block doesn't exist
1841  */
1842 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1843 				       enum amd_ip_block_type type,
1844 				       u32 major, u32 minor)
1845 {
1846 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1847 
1848 	if (ip_block && ((ip_block->version->major > major) ||
1849 			((ip_block->version->major == major) &&
1850 			(ip_block->version->minor >= minor))))
1851 		return 0;
1852 
1853 	return 1;
1854 }
1855 
1856 /**
1857  * amdgpu_device_ip_block_add
1858  *
1859  * @adev: amdgpu_device pointer
1860  * @ip_block_version: pointer to the IP to add
1861  *
1862  * Adds the IP block driver information to the collection of IPs
1863  * on the asic.
1864  */
1865 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1866 			       const struct amdgpu_ip_block_version *ip_block_version)
1867 {
1868 	if (!ip_block_version)
1869 		return -EINVAL;
1870 
1871 	switch (ip_block_version->type) {
1872 	case AMD_IP_BLOCK_TYPE_VCN:
1873 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1874 			return 0;
1875 		break;
1876 	case AMD_IP_BLOCK_TYPE_JPEG:
1877 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1878 			return 0;
1879 		break;
1880 	default:
1881 		break;
1882 	}
1883 
1884 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1885 		  ip_block_version->funcs->name);
1886 
1887 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1888 
1889 	return 0;
1890 }
1891 
1892 /**
1893  * amdgpu_device_enable_virtual_display - enable virtual display feature
1894  *
1895  * @adev: amdgpu_device pointer
1896  *
1897  * Enabled the virtual display feature if the user has enabled it via
1898  * the module parameter virtual_display.  This feature provides a virtual
1899  * display hardware on headless boards or in virtualized environments.
1900  * This function parses and validates the configuration string specified by
1901  * the user and configues the virtual display configuration (number of
1902  * virtual connectors, crtcs, etc.) specified.
1903  */
1904 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1905 {
1906 	adev->enable_virtual_display = false;
1907 
1908 #ifdef notyet
1909 	if (amdgpu_virtual_display) {
1910 		const char *pci_address_name = pci_name(adev->pdev);
1911 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1912 
1913 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1914 		pciaddstr_tmp = pciaddstr;
1915 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1916 			pciaddname = strsep(&pciaddname_tmp, ",");
1917 			if (!strcmp("all", pciaddname)
1918 			    || !strcmp(pci_address_name, pciaddname)) {
1919 				long num_crtc;
1920 				int res = -1;
1921 
1922 				adev->enable_virtual_display = true;
1923 
1924 				if (pciaddname_tmp)
1925 					res = kstrtol(pciaddname_tmp, 10,
1926 						      &num_crtc);
1927 
1928 				if (!res) {
1929 					if (num_crtc < 1)
1930 						num_crtc = 1;
1931 					if (num_crtc > 6)
1932 						num_crtc = 6;
1933 					adev->mode_info.num_crtc = num_crtc;
1934 				} else {
1935 					adev->mode_info.num_crtc = 1;
1936 				}
1937 				break;
1938 			}
1939 		}
1940 
1941 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1942 			 amdgpu_virtual_display, pci_address_name,
1943 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1944 
1945 		kfree(pciaddstr);
1946 	}
1947 #endif
1948 }
1949 
1950 /**
1951  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1952  *
1953  * @adev: amdgpu_device pointer
1954  *
1955  * Parses the asic configuration parameters specified in the gpu info
1956  * firmware and makes them availale to the driver for use in configuring
1957  * the asic.
1958  * Returns 0 on success, -EINVAL on failure.
1959  */
1960 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1961 {
1962 	const char *chip_name;
1963 	char fw_name[40];
1964 	int err;
1965 	const struct gpu_info_firmware_header_v1_0 *hdr;
1966 
1967 	adev->firmware.gpu_info_fw = NULL;
1968 
1969 	if (adev->mman.discovery_bin) {
1970 		/*
1971 		 * FIXME: The bounding box is still needed by Navi12, so
1972 		 * temporarily read it from gpu_info firmware. Should be dropped
1973 		 * when DAL no longer needs it.
1974 		 */
1975 		if (adev->asic_type != CHIP_NAVI12)
1976 			return 0;
1977 	}
1978 
1979 	switch (adev->asic_type) {
1980 	default:
1981 		return 0;
1982 	case CHIP_VEGA10:
1983 		chip_name = "vega10";
1984 		break;
1985 	case CHIP_VEGA12:
1986 		chip_name = "vega12";
1987 		break;
1988 	case CHIP_RAVEN:
1989 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1990 			chip_name = "raven2";
1991 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1992 			chip_name = "picasso";
1993 		else
1994 			chip_name = "raven";
1995 		break;
1996 	case CHIP_ARCTURUS:
1997 		chip_name = "arcturus";
1998 		break;
1999 	case CHIP_NAVI12:
2000 		chip_name = "navi12";
2001 		break;
2002 	}
2003 
2004 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2005 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
2006 	if (err) {
2007 		dev_err(adev->dev,
2008 			"Failed to load gpu_info firmware \"%s\"\n",
2009 			fw_name);
2010 		goto out;
2011 	}
2012 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
2013 	if (err) {
2014 		dev_err(adev->dev,
2015 			"Failed to validate gpu_info firmware \"%s\"\n",
2016 			fw_name);
2017 		goto out;
2018 	}
2019 
2020 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2021 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2022 
2023 	switch (hdr->version_major) {
2024 	case 1:
2025 	{
2026 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2027 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2028 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2029 
2030 		/*
2031 		 * Should be droped when DAL no longer needs it.
2032 		 */
2033 		if (adev->asic_type == CHIP_NAVI12)
2034 			goto parse_soc_bounding_box;
2035 
2036 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2037 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2038 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2039 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2040 		adev->gfx.config.max_texture_channel_caches =
2041 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2042 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2043 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2044 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2045 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2046 		adev->gfx.config.double_offchip_lds_buf =
2047 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2048 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2049 		adev->gfx.cu_info.max_waves_per_simd =
2050 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2051 		adev->gfx.cu_info.max_scratch_slots_per_cu =
2052 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2053 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2054 		if (hdr->version_minor >= 1) {
2055 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2056 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2057 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2058 			adev->gfx.config.num_sc_per_sh =
2059 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2060 			adev->gfx.config.num_packer_per_sc =
2061 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2062 		}
2063 
2064 parse_soc_bounding_box:
2065 		/*
2066 		 * soc bounding box info is not integrated in disocovery table,
2067 		 * we always need to parse it from gpu info firmware if needed.
2068 		 */
2069 		if (hdr->version_minor == 2) {
2070 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2071 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2072 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2073 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2074 		}
2075 		break;
2076 	}
2077 	default:
2078 		dev_err(adev->dev,
2079 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2080 		err = -EINVAL;
2081 		goto out;
2082 	}
2083 out:
2084 	return err;
2085 }
2086 
2087 /**
2088  * amdgpu_device_ip_early_init - run early init for hardware IPs
2089  *
2090  * @adev: amdgpu_device pointer
2091  *
2092  * Early initialization pass for hardware IPs.  The hardware IPs that make
2093  * up each asic are discovered each IP's early_init callback is run.  This
2094  * is the first stage in initializing the asic.
2095  * Returns 0 on success, negative error code on failure.
2096  */
2097 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2098 {
2099 	struct drm_device *dev = adev_to_drm(adev);
2100 	struct pci_dev *parent;
2101 	int i, r;
2102 
2103 	amdgpu_device_enable_virtual_display(adev);
2104 
2105 	if (amdgpu_sriov_vf(adev)) {
2106 		r = amdgpu_virt_request_full_gpu(adev, true);
2107 		if (r)
2108 			return r;
2109 	}
2110 
2111 	switch (adev->asic_type) {
2112 #ifdef CONFIG_DRM_AMDGPU_SI
2113 	case CHIP_VERDE:
2114 	case CHIP_TAHITI:
2115 	case CHIP_PITCAIRN:
2116 	case CHIP_OLAND:
2117 	case CHIP_HAINAN:
2118 		adev->family = AMDGPU_FAMILY_SI;
2119 		r = si_set_ip_blocks(adev);
2120 		if (r)
2121 			return r;
2122 		break;
2123 #endif
2124 #ifdef CONFIG_DRM_AMDGPU_CIK
2125 	case CHIP_BONAIRE:
2126 	case CHIP_HAWAII:
2127 	case CHIP_KAVERI:
2128 	case CHIP_KABINI:
2129 	case CHIP_MULLINS:
2130 		if (adev->flags & AMD_IS_APU)
2131 			adev->family = AMDGPU_FAMILY_KV;
2132 		else
2133 			adev->family = AMDGPU_FAMILY_CI;
2134 
2135 		r = cik_set_ip_blocks(adev);
2136 		if (r)
2137 			return r;
2138 		break;
2139 #endif
2140 	case CHIP_TOPAZ:
2141 	case CHIP_TONGA:
2142 	case CHIP_FIJI:
2143 	case CHIP_POLARIS10:
2144 	case CHIP_POLARIS11:
2145 	case CHIP_POLARIS12:
2146 	case CHIP_VEGAM:
2147 	case CHIP_CARRIZO:
2148 	case CHIP_STONEY:
2149 		if (adev->flags & AMD_IS_APU)
2150 			adev->family = AMDGPU_FAMILY_CZ;
2151 		else
2152 			adev->family = AMDGPU_FAMILY_VI;
2153 
2154 		r = vi_set_ip_blocks(adev);
2155 		if (r)
2156 			return r;
2157 		break;
2158 	default:
2159 		r = amdgpu_discovery_set_ip_blocks(adev);
2160 		if (r)
2161 			return r;
2162 		break;
2163 	}
2164 
2165 	if (amdgpu_has_atpx() &&
2166 	    (amdgpu_is_atpx_hybrid() ||
2167 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2168 	    ((adev->flags & AMD_IS_APU) == 0) &&
2169 	    !pci_is_thunderbolt_attached(dev->pdev))
2170 		adev->flags |= AMD_IS_PX;
2171 
2172 	if (!(adev->flags & AMD_IS_APU)) {
2173 		parent = pci_upstream_bridge(adev->pdev);
2174 		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2175 	}
2176 
2177 	amdgpu_amdkfd_device_probe(adev);
2178 
2179 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2180 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2181 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2182 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2183 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2184 
2185 	for (i = 0; i < adev->num_ip_blocks; i++) {
2186 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2187 			DRM_ERROR("disabled ip block: %d <%s>\n",
2188 				  i, adev->ip_blocks[i].version->funcs->name);
2189 			adev->ip_blocks[i].status.valid = false;
2190 		} else {
2191 			if (adev->ip_blocks[i].version->funcs->early_init) {
2192 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2193 				if (r == -ENOENT) {
2194 					adev->ip_blocks[i].status.valid = false;
2195 				} else if (r) {
2196 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2197 						  adev->ip_blocks[i].version->funcs->name, r);
2198 					return r;
2199 				} else {
2200 					adev->ip_blocks[i].status.valid = true;
2201 				}
2202 			} else {
2203 				adev->ip_blocks[i].status.valid = true;
2204 			}
2205 		}
2206 		/* get the vbios after the asic_funcs are set up */
2207 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2208 			r = amdgpu_device_parse_gpu_info_fw(adev);
2209 			if (r)
2210 				return r;
2211 
2212 			/* Read BIOS */
2213 			if (!amdgpu_get_bios(adev))
2214 				return -EINVAL;
2215 
2216 			r = amdgpu_atombios_init(adev);
2217 			if (r) {
2218 				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2219 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2220 				return r;
2221 			}
2222 
2223 			/*get pf2vf msg info at it's earliest time*/
2224 			if (amdgpu_sriov_vf(adev))
2225 				amdgpu_virt_init_data_exchange(adev);
2226 
2227 		}
2228 	}
2229 
2230 	adev->cg_flags &= amdgpu_cg_mask;
2231 	adev->pg_flags &= amdgpu_pg_mask;
2232 
2233 	return 0;
2234 }
2235 
2236 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2237 {
2238 	int i, r;
2239 
2240 	for (i = 0; i < adev->num_ip_blocks; i++) {
2241 		if (!adev->ip_blocks[i].status.sw)
2242 			continue;
2243 		if (adev->ip_blocks[i].status.hw)
2244 			continue;
2245 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2246 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2247 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2248 			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2249 			if (r) {
2250 				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2251 					  adev->ip_blocks[i].version->funcs->name, r);
2252 				return r;
2253 			}
2254 			adev->ip_blocks[i].status.hw = true;
2255 		}
2256 	}
2257 
2258 	return 0;
2259 }
2260 
2261 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2262 {
2263 	int i, r;
2264 
2265 	for (i = 0; i < adev->num_ip_blocks; i++) {
2266 		if (!adev->ip_blocks[i].status.sw)
2267 			continue;
2268 		if (adev->ip_blocks[i].status.hw)
2269 			continue;
2270 		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2271 		if (r) {
2272 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2273 				  adev->ip_blocks[i].version->funcs->name, r);
2274 			return r;
2275 		}
2276 		adev->ip_blocks[i].status.hw = true;
2277 	}
2278 
2279 	return 0;
2280 }
2281 
2282 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2283 {
2284 	int r = 0;
2285 	int i;
2286 	uint32_t smu_version;
2287 
2288 	if (adev->asic_type >= CHIP_VEGA10) {
2289 		for (i = 0; i < adev->num_ip_blocks; i++) {
2290 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2291 				continue;
2292 
2293 			if (!adev->ip_blocks[i].status.sw)
2294 				continue;
2295 
2296 			/* no need to do the fw loading again if already done*/
2297 			if (adev->ip_blocks[i].status.hw == true)
2298 				break;
2299 
2300 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2301 				r = adev->ip_blocks[i].version->funcs->resume(adev);
2302 				if (r) {
2303 					DRM_ERROR("resume of IP block <%s> failed %d\n",
2304 							  adev->ip_blocks[i].version->funcs->name, r);
2305 					return r;
2306 				}
2307 			} else {
2308 				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2309 				if (r) {
2310 					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2311 							  adev->ip_blocks[i].version->funcs->name, r);
2312 					return r;
2313 				}
2314 			}
2315 
2316 			adev->ip_blocks[i].status.hw = true;
2317 			break;
2318 		}
2319 	}
2320 
2321 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2322 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2323 
2324 	return r;
2325 }
2326 
2327 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2328 {
2329 	long timeout;
2330 	int r, i;
2331 
2332 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2333 		struct amdgpu_ring *ring = adev->rings[i];
2334 
2335 		/* No need to setup the GPU scheduler for rings that don't need it */
2336 		if (!ring || ring->no_scheduler)
2337 			continue;
2338 
2339 		switch (ring->funcs->type) {
2340 		case AMDGPU_RING_TYPE_GFX:
2341 			timeout = adev->gfx_timeout;
2342 			break;
2343 		case AMDGPU_RING_TYPE_COMPUTE:
2344 			timeout = adev->compute_timeout;
2345 			break;
2346 		case AMDGPU_RING_TYPE_SDMA:
2347 			timeout = adev->sdma_timeout;
2348 			break;
2349 		default:
2350 			timeout = adev->video_timeout;
2351 			break;
2352 		}
2353 
2354 		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2355 				   ring->num_hw_submission, amdgpu_job_hang_limit,
2356 				   timeout, adev->reset_domain->wq,
2357 				   ring->sched_score, ring->name,
2358 				   adev->dev);
2359 		if (r) {
2360 			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2361 				  ring->name);
2362 			return r;
2363 		}
2364 	}
2365 
2366 	return 0;
2367 }
2368 
2369 
2370 /**
2371  * amdgpu_device_ip_init - run init for hardware IPs
2372  *
2373  * @adev: amdgpu_device pointer
2374  *
2375  * Main initialization pass for hardware IPs.  The list of all the hardware
2376  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2377  * are run.  sw_init initializes the software state associated with each IP
2378  * and hw_init initializes the hardware associated with each IP.
2379  * Returns 0 on success, negative error code on failure.
2380  */
2381 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2382 {
2383 	int i, r;
2384 
2385 	r = amdgpu_ras_init(adev);
2386 	if (r)
2387 		return r;
2388 
2389 	for (i = 0; i < adev->num_ip_blocks; i++) {
2390 		if (!adev->ip_blocks[i].status.valid)
2391 			continue;
2392 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2393 		if (r) {
2394 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2395 				  adev->ip_blocks[i].version->funcs->name, r);
2396 			goto init_failed;
2397 		}
2398 		adev->ip_blocks[i].status.sw = true;
2399 
2400 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2401 			/* need to do common hw init early so everything is set up for gmc */
2402 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2403 			if (r) {
2404 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2405 				goto init_failed;
2406 			}
2407 			adev->ip_blocks[i].status.hw = true;
2408 		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2409 			/* need to do gmc hw init early so we can allocate gpu mem */
2410 			/* Try to reserve bad pages early */
2411 			if (amdgpu_sriov_vf(adev))
2412 				amdgpu_virt_exchange_data(adev);
2413 
2414 			r = amdgpu_device_vram_scratch_init(adev);
2415 			if (r) {
2416 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2417 				goto init_failed;
2418 			}
2419 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2420 			if (r) {
2421 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2422 				goto init_failed;
2423 			}
2424 			r = amdgpu_device_wb_init(adev);
2425 			if (r) {
2426 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2427 				goto init_failed;
2428 			}
2429 			adev->ip_blocks[i].status.hw = true;
2430 
2431 			/* right after GMC hw init, we create CSA */
2432 			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2433 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2434 								AMDGPU_GEM_DOMAIN_VRAM,
2435 								AMDGPU_CSA_SIZE);
2436 				if (r) {
2437 					DRM_ERROR("allocate CSA failed %d\n", r);
2438 					goto init_failed;
2439 				}
2440 			}
2441 		}
2442 	}
2443 
2444 	if (amdgpu_sriov_vf(adev))
2445 		amdgpu_virt_init_data_exchange(adev);
2446 
2447 	r = amdgpu_ib_pool_init(adev);
2448 	if (r) {
2449 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2450 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2451 		goto init_failed;
2452 	}
2453 
2454 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2455 	if (r)
2456 		goto init_failed;
2457 
2458 	r = amdgpu_device_ip_hw_init_phase1(adev);
2459 	if (r)
2460 		goto init_failed;
2461 
2462 	r = amdgpu_device_fw_loading(adev);
2463 	if (r)
2464 		goto init_failed;
2465 
2466 	r = amdgpu_device_ip_hw_init_phase2(adev);
2467 	if (r)
2468 		goto init_failed;
2469 
2470 	/*
2471 	 * retired pages will be loaded from eeprom and reserved here,
2472 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2473 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2474 	 * for I2C communication which only true at this point.
2475 	 *
2476 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2477 	 * failure from bad gpu situation and stop amdgpu init process
2478 	 * accordingly. For other failed cases, it will still release all
2479 	 * the resource and print error message, rather than returning one
2480 	 * negative value to upper level.
2481 	 *
2482 	 * Note: theoretically, this should be called before all vram allocations
2483 	 * to protect retired page from abusing
2484 	 */
2485 	r = amdgpu_ras_recovery_init(adev);
2486 	if (r)
2487 		goto init_failed;
2488 
2489 	/**
2490 	 * In case of XGMI grab extra reference for reset domain for this device
2491 	 */
2492 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2493 		if (amdgpu_xgmi_add_device(adev) == 0) {
2494 			if (!amdgpu_sriov_vf(adev)) {
2495 				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2496 
2497 				if (WARN_ON(!hive)) {
2498 					r = -ENOENT;
2499 					goto init_failed;
2500 				}
2501 
2502 				if (!hive->reset_domain ||
2503 				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2504 					r = -ENOENT;
2505 					amdgpu_put_xgmi_hive(hive);
2506 					goto init_failed;
2507 				}
2508 
2509 				/* Drop the early temporary reset domain we created for device */
2510 				amdgpu_reset_put_reset_domain(adev->reset_domain);
2511 				adev->reset_domain = hive->reset_domain;
2512 				amdgpu_put_xgmi_hive(hive);
2513 			}
2514 		}
2515 	}
2516 
2517 	r = amdgpu_device_init_schedulers(adev);
2518 	if (r)
2519 		goto init_failed;
2520 
2521 	/* Don't init kfd if whole hive need to be reset during init */
2522 	if (!adev->gmc.xgmi.pending_reset)
2523 		amdgpu_amdkfd_device_init(adev);
2524 
2525 	amdgpu_fru_get_product_info(adev);
2526 
2527 init_failed:
2528 	if (amdgpu_sriov_vf(adev))
2529 		amdgpu_virt_release_full_gpu(adev, true);
2530 
2531 	return r;
2532 }
2533 
2534 /**
2535  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2536  *
2537  * @adev: amdgpu_device pointer
2538  *
2539  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2540  * this function before a GPU reset.  If the value is retained after a
2541  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2542  */
2543 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2544 {
2545 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2546 }
2547 
2548 /**
2549  * amdgpu_device_check_vram_lost - check if vram is valid
2550  *
2551  * @adev: amdgpu_device pointer
2552  *
2553  * Checks the reset magic value written to the gart pointer in VRAM.
2554  * The driver calls this after a GPU reset to see if the contents of
2555  * VRAM is lost or now.
2556  * returns true if vram is lost, false if not.
2557  */
2558 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2559 {
2560 	if (memcmp(adev->gart.ptr, adev->reset_magic,
2561 			AMDGPU_RESET_MAGIC_NUM))
2562 		return true;
2563 
2564 	if (!amdgpu_in_reset(adev))
2565 		return false;
2566 
2567 	/*
2568 	 * For all ASICs with baco/mode1 reset, the VRAM is
2569 	 * always assumed to be lost.
2570 	 */
2571 	switch (amdgpu_asic_reset_method(adev)) {
2572 	case AMD_RESET_METHOD_BACO:
2573 	case AMD_RESET_METHOD_MODE1:
2574 		return true;
2575 	default:
2576 		return false;
2577 	}
2578 }
2579 
2580 /**
2581  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2582  *
2583  * @adev: amdgpu_device pointer
2584  * @state: clockgating state (gate or ungate)
2585  *
2586  * The list of all the hardware IPs that make up the asic is walked and the
2587  * set_clockgating_state callbacks are run.
2588  * Late initialization pass enabling clockgating for hardware IPs.
2589  * Fini or suspend, pass disabling clockgating for hardware IPs.
2590  * Returns 0 on success, negative error code on failure.
2591  */
2592 
2593 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2594 			       enum amd_clockgating_state state)
2595 {
2596 	int i, j, r;
2597 
2598 	if (amdgpu_emu_mode == 1)
2599 		return 0;
2600 
2601 	for (j = 0; j < adev->num_ip_blocks; j++) {
2602 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2603 		if (!adev->ip_blocks[i].status.late_initialized)
2604 			continue;
2605 		/* skip CG for GFX on S0ix */
2606 		if (adev->in_s0ix &&
2607 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2608 			continue;
2609 		/* skip CG for VCE/UVD, it's handled specially */
2610 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2611 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2612 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2613 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2614 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2615 			/* enable clockgating to save power */
2616 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2617 										     state);
2618 			if (r) {
2619 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2620 					  adev->ip_blocks[i].version->funcs->name, r);
2621 				return r;
2622 			}
2623 		}
2624 	}
2625 
2626 	return 0;
2627 }
2628 
2629 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2630 			       enum amd_powergating_state state)
2631 {
2632 	int i, j, r;
2633 
2634 	if (amdgpu_emu_mode == 1)
2635 		return 0;
2636 
2637 	for (j = 0; j < adev->num_ip_blocks; j++) {
2638 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2639 		if (!adev->ip_blocks[i].status.late_initialized)
2640 			continue;
2641 		/* skip PG for GFX on S0ix */
2642 		if (adev->in_s0ix &&
2643 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2644 			continue;
2645 		/* skip CG for VCE/UVD, it's handled specially */
2646 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2647 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2648 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2649 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2650 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2651 			/* enable powergating to save power */
2652 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2653 											state);
2654 			if (r) {
2655 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2656 					  adev->ip_blocks[i].version->funcs->name, r);
2657 				return r;
2658 			}
2659 		}
2660 	}
2661 	return 0;
2662 }
2663 
2664 static int amdgpu_device_enable_mgpu_fan_boost(void)
2665 {
2666 	struct amdgpu_gpu_instance *gpu_ins;
2667 	struct amdgpu_device *adev;
2668 	int i, ret = 0;
2669 
2670 	mutex_lock(&mgpu_info.mutex);
2671 
2672 	/*
2673 	 * MGPU fan boost feature should be enabled
2674 	 * only when there are two or more dGPUs in
2675 	 * the system
2676 	 */
2677 	if (mgpu_info.num_dgpu < 2)
2678 		goto out;
2679 
2680 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2681 		gpu_ins = &(mgpu_info.gpu_ins[i]);
2682 		adev = gpu_ins->adev;
2683 		if (!(adev->flags & AMD_IS_APU) &&
2684 		    !gpu_ins->mgpu_fan_enabled) {
2685 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2686 			if (ret)
2687 				break;
2688 
2689 			gpu_ins->mgpu_fan_enabled = 1;
2690 		}
2691 	}
2692 
2693 out:
2694 	mutex_unlock(&mgpu_info.mutex);
2695 
2696 	return ret;
2697 }
2698 
2699 /**
2700  * amdgpu_device_ip_late_init - run late init for hardware IPs
2701  *
2702  * @adev: amdgpu_device pointer
2703  *
2704  * Late initialization pass for hardware IPs.  The list of all the hardware
2705  * IPs that make up the asic is walked and the late_init callbacks are run.
2706  * late_init covers any special initialization that an IP requires
2707  * after all of the have been initialized or something that needs to happen
2708  * late in the init process.
2709  * Returns 0 on success, negative error code on failure.
2710  */
2711 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2712 {
2713 	struct amdgpu_gpu_instance *gpu_instance;
2714 	int i = 0, r;
2715 
2716 	for (i = 0; i < adev->num_ip_blocks; i++) {
2717 		if (!adev->ip_blocks[i].status.hw)
2718 			continue;
2719 		if (adev->ip_blocks[i].version->funcs->late_init) {
2720 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2721 			if (r) {
2722 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2723 					  adev->ip_blocks[i].version->funcs->name, r);
2724 				return r;
2725 			}
2726 		}
2727 		adev->ip_blocks[i].status.late_initialized = true;
2728 	}
2729 
2730 	r = amdgpu_ras_late_init(adev);
2731 	if (r) {
2732 		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2733 		return r;
2734 	}
2735 
2736 	amdgpu_ras_set_error_query_ready(adev, true);
2737 
2738 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2739 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2740 
2741 	amdgpu_device_fill_reset_magic(adev);
2742 
2743 	r = amdgpu_device_enable_mgpu_fan_boost();
2744 	if (r)
2745 		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2746 
2747 	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2748 	if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2749 			       adev->asic_type == CHIP_ALDEBARAN ))
2750 		amdgpu_dpm_handle_passthrough_sbr(adev, true);
2751 
2752 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2753 		mutex_lock(&mgpu_info.mutex);
2754 
2755 		/*
2756 		 * Reset device p-state to low as this was booted with high.
2757 		 *
2758 		 * This should be performed only after all devices from the same
2759 		 * hive get initialized.
2760 		 *
2761 		 * However, it's unknown how many device in the hive in advance.
2762 		 * As this is counted one by one during devices initializations.
2763 		 *
2764 		 * So, we wait for all XGMI interlinked devices initialized.
2765 		 * This may bring some delays as those devices may come from
2766 		 * different hives. But that should be OK.
2767 		 */
2768 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2769 			for (i = 0; i < mgpu_info.num_gpu; i++) {
2770 				gpu_instance = &(mgpu_info.gpu_ins[i]);
2771 				if (gpu_instance->adev->flags & AMD_IS_APU)
2772 					continue;
2773 
2774 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2775 						AMDGPU_XGMI_PSTATE_MIN);
2776 				if (r) {
2777 					DRM_ERROR("pstate setting failed (%d).\n", r);
2778 					break;
2779 				}
2780 			}
2781 		}
2782 
2783 		mutex_unlock(&mgpu_info.mutex);
2784 	}
2785 
2786 	return 0;
2787 }
2788 
2789 /**
2790  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2791  *
2792  * @adev: amdgpu_device pointer
2793  *
2794  * For ASICs need to disable SMC first
2795  */
2796 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2797 {
2798 	int i, r;
2799 
2800 	if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2801 		return;
2802 
2803 	for (i = 0; i < adev->num_ip_blocks; i++) {
2804 		if (!adev->ip_blocks[i].status.hw)
2805 			continue;
2806 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2807 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2808 			/* XXX handle errors */
2809 			if (r) {
2810 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2811 					  adev->ip_blocks[i].version->funcs->name, r);
2812 			}
2813 			adev->ip_blocks[i].status.hw = false;
2814 			break;
2815 		}
2816 	}
2817 }
2818 
2819 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2820 {
2821 	int i, r;
2822 
2823 	for (i = 0; i < adev->num_ip_blocks; i++) {
2824 		if (!adev->ip_blocks[i].version->funcs->early_fini)
2825 			continue;
2826 
2827 		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2828 		if (r) {
2829 			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2830 				  adev->ip_blocks[i].version->funcs->name, r);
2831 		}
2832 	}
2833 
2834 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2835 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2836 
2837 	amdgpu_amdkfd_suspend(adev, false);
2838 
2839 	/* Workaroud for ASICs need to disable SMC first */
2840 	amdgpu_device_smu_fini_early(adev);
2841 
2842 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2843 		if (!adev->ip_blocks[i].status.hw)
2844 			continue;
2845 
2846 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2847 		/* XXX handle errors */
2848 		if (r) {
2849 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2850 				  adev->ip_blocks[i].version->funcs->name, r);
2851 		}
2852 
2853 		adev->ip_blocks[i].status.hw = false;
2854 	}
2855 
2856 	if (amdgpu_sriov_vf(adev)) {
2857 		if (amdgpu_virt_release_full_gpu(adev, false))
2858 			DRM_ERROR("failed to release exclusive mode on fini\n");
2859 	}
2860 
2861 	return 0;
2862 }
2863 
2864 /**
2865  * amdgpu_device_ip_fini - run fini for hardware IPs
2866  *
2867  * @adev: amdgpu_device pointer
2868  *
2869  * Main teardown pass for hardware IPs.  The list of all the hardware
2870  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2871  * are run.  hw_fini tears down the hardware associated with each IP
2872  * and sw_fini tears down any software state associated with each IP.
2873  * Returns 0 on success, negative error code on failure.
2874  */
2875 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2876 {
2877 	int i, r;
2878 
2879 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2880 		amdgpu_virt_release_ras_err_handler_data(adev);
2881 
2882 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2883 		amdgpu_xgmi_remove_device(adev);
2884 
2885 	amdgpu_amdkfd_device_fini_sw(adev);
2886 
2887 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2888 		if (!adev->ip_blocks[i].status.sw)
2889 			continue;
2890 
2891 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2892 			amdgpu_ucode_free_bo(adev);
2893 			amdgpu_free_static_csa(&adev->virt.csa_obj);
2894 			amdgpu_device_wb_fini(adev);
2895 			amdgpu_device_vram_scratch_fini(adev);
2896 			amdgpu_ib_pool_fini(adev);
2897 		}
2898 
2899 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2900 		/* XXX handle errors */
2901 		if (r) {
2902 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2903 				  adev->ip_blocks[i].version->funcs->name, r);
2904 		}
2905 		adev->ip_blocks[i].status.sw = false;
2906 		adev->ip_blocks[i].status.valid = false;
2907 	}
2908 
2909 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2910 		if (!adev->ip_blocks[i].status.late_initialized)
2911 			continue;
2912 		if (adev->ip_blocks[i].version->funcs->late_fini)
2913 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2914 		adev->ip_blocks[i].status.late_initialized = false;
2915 	}
2916 
2917 	amdgpu_ras_fini(adev);
2918 
2919 	return 0;
2920 }
2921 
2922 /**
2923  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2924  *
2925  * @work: work_struct.
2926  */
2927 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2928 {
2929 	struct amdgpu_device *adev =
2930 		container_of(work, struct amdgpu_device, delayed_init_work.work);
2931 	int r;
2932 
2933 	r = amdgpu_ib_ring_tests(adev);
2934 	if (r)
2935 		DRM_ERROR("ib ring test failed (%d).\n", r);
2936 }
2937 
2938 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2939 {
2940 	struct amdgpu_device *adev =
2941 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2942 
2943 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2944 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2945 
2946 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2947 		adev->gfx.gfx_off_state = true;
2948 }
2949 
2950 /**
2951  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2952  *
2953  * @adev: amdgpu_device pointer
2954  *
2955  * Main suspend function for hardware IPs.  The list of all the hardware
2956  * IPs that make up the asic is walked, clockgating is disabled and the
2957  * suspend callbacks are run.  suspend puts the hardware and software state
2958  * in each IP into a state suitable for suspend.
2959  * Returns 0 on success, negative error code on failure.
2960  */
2961 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2962 {
2963 	int i, r;
2964 
2965 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2966 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2967 
2968 	/*
2969 	 * Per PMFW team's suggestion, driver needs to handle gfxoff
2970 	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2971 	 * scenario. Add the missing df cstate disablement here.
2972 	 */
2973 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2974 		dev_warn(adev->dev, "Failed to disallow df cstate");
2975 
2976 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2977 		if (!adev->ip_blocks[i].status.valid)
2978 			continue;
2979 
2980 		/* displays are handled separately */
2981 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2982 			continue;
2983 
2984 		/* XXX handle errors */
2985 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2986 		/* XXX handle errors */
2987 		if (r) {
2988 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2989 				  adev->ip_blocks[i].version->funcs->name, r);
2990 			return r;
2991 		}
2992 
2993 		adev->ip_blocks[i].status.hw = false;
2994 	}
2995 
2996 	return 0;
2997 }
2998 
2999 /**
3000  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3001  *
3002  * @adev: amdgpu_device pointer
3003  *
3004  * Main suspend function for hardware IPs.  The list of all the hardware
3005  * IPs that make up the asic is walked, clockgating is disabled and the
3006  * suspend callbacks are run.  suspend puts the hardware and software state
3007  * in each IP into a state suitable for suspend.
3008  * Returns 0 on success, negative error code on failure.
3009  */
3010 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3011 {
3012 	int i, r;
3013 
3014 	if (adev->in_s0ix)
3015 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3016 
3017 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3018 		if (!adev->ip_blocks[i].status.valid)
3019 			continue;
3020 		/* displays are handled in phase1 */
3021 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3022 			continue;
3023 		/* PSP lost connection when err_event_athub occurs */
3024 		if (amdgpu_ras_intr_triggered() &&
3025 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3026 			adev->ip_blocks[i].status.hw = false;
3027 			continue;
3028 		}
3029 
3030 		/* skip unnecessary suspend if we do not initialize them yet */
3031 		if (adev->gmc.xgmi.pending_reset &&
3032 		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3033 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3034 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3035 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3036 			adev->ip_blocks[i].status.hw = false;
3037 			continue;
3038 		}
3039 
3040 		/* skip suspend of gfx and psp for S0ix
3041 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3042 		 * like at runtime. PSP is also part of the always on hardware
3043 		 * so no need to suspend it.
3044 		 */
3045 		if (adev->in_s0ix &&
3046 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3047 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
3048 			continue;
3049 
3050 		/* XXX handle errors */
3051 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3052 		/* XXX handle errors */
3053 		if (r) {
3054 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3055 				  adev->ip_blocks[i].version->funcs->name, r);
3056 		}
3057 		adev->ip_blocks[i].status.hw = false;
3058 		/* handle putting the SMC in the appropriate state */
3059 		if(!amdgpu_sriov_vf(adev)){
3060 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3061 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3062 				if (r) {
3063 					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3064 							adev->mp1_state, r);
3065 					return r;
3066 				}
3067 			}
3068 		}
3069 	}
3070 
3071 	return 0;
3072 }
3073 
3074 /**
3075  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3076  *
3077  * @adev: amdgpu_device pointer
3078  *
3079  * Main suspend function for hardware IPs.  The list of all the hardware
3080  * IPs that make up the asic is walked, clockgating is disabled and the
3081  * suspend callbacks are run.  suspend puts the hardware and software state
3082  * in each IP into a state suitable for suspend.
3083  * Returns 0 on success, negative error code on failure.
3084  */
3085 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3086 {
3087 	int r;
3088 
3089 	if (amdgpu_sriov_vf(adev)) {
3090 		amdgpu_virt_fini_data_exchange(adev);
3091 		amdgpu_virt_request_full_gpu(adev, false);
3092 	}
3093 
3094 	r = amdgpu_device_ip_suspend_phase1(adev);
3095 	if (r)
3096 		return r;
3097 	r = amdgpu_device_ip_suspend_phase2(adev);
3098 
3099 	if (amdgpu_sriov_vf(adev))
3100 		amdgpu_virt_release_full_gpu(adev, false);
3101 
3102 	return r;
3103 }
3104 
3105 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3106 {
3107 	int i, r;
3108 
3109 	static enum amd_ip_block_type ip_order[] = {
3110 		AMD_IP_BLOCK_TYPE_COMMON,
3111 		AMD_IP_BLOCK_TYPE_GMC,
3112 		AMD_IP_BLOCK_TYPE_PSP,
3113 		AMD_IP_BLOCK_TYPE_IH,
3114 	};
3115 
3116 	for (i = 0; i < adev->num_ip_blocks; i++) {
3117 		int j;
3118 		struct amdgpu_ip_block *block;
3119 
3120 		block = &adev->ip_blocks[i];
3121 		block->status.hw = false;
3122 
3123 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3124 
3125 			if (block->version->type != ip_order[j] ||
3126 				!block->status.valid)
3127 				continue;
3128 
3129 			r = block->version->funcs->hw_init(adev);
3130 			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3131 			if (r)
3132 				return r;
3133 			block->status.hw = true;
3134 		}
3135 	}
3136 
3137 	return 0;
3138 }
3139 
3140 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3141 {
3142 	int i, r;
3143 
3144 	static enum amd_ip_block_type ip_order[] = {
3145 		AMD_IP_BLOCK_TYPE_SMC,
3146 		AMD_IP_BLOCK_TYPE_DCE,
3147 		AMD_IP_BLOCK_TYPE_GFX,
3148 		AMD_IP_BLOCK_TYPE_SDMA,
3149 		AMD_IP_BLOCK_TYPE_UVD,
3150 		AMD_IP_BLOCK_TYPE_VCE,
3151 		AMD_IP_BLOCK_TYPE_VCN
3152 	};
3153 
3154 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3155 		int j;
3156 		struct amdgpu_ip_block *block;
3157 
3158 		for (j = 0; j < adev->num_ip_blocks; j++) {
3159 			block = &adev->ip_blocks[j];
3160 
3161 			if (block->version->type != ip_order[i] ||
3162 				!block->status.valid ||
3163 				block->status.hw)
3164 				continue;
3165 
3166 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3167 				r = block->version->funcs->resume(adev);
3168 			else
3169 				r = block->version->funcs->hw_init(adev);
3170 
3171 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3172 			if (r)
3173 				return r;
3174 			block->status.hw = true;
3175 		}
3176 	}
3177 
3178 	return 0;
3179 }
3180 
3181 /**
3182  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3183  *
3184  * @adev: amdgpu_device pointer
3185  *
3186  * First resume function for hardware IPs.  The list of all the hardware
3187  * IPs that make up the asic is walked and the resume callbacks are run for
3188  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3189  * after a suspend and updates the software state as necessary.  This
3190  * function is also used for restoring the GPU after a GPU reset.
3191  * Returns 0 on success, negative error code on failure.
3192  */
3193 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3194 {
3195 	int i, r;
3196 
3197 	for (i = 0; i < adev->num_ip_blocks; i++) {
3198 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3199 			continue;
3200 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3201 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3202 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3203 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3204 
3205 			r = adev->ip_blocks[i].version->funcs->resume(adev);
3206 			if (r) {
3207 				DRM_ERROR("resume of IP block <%s> failed %d\n",
3208 					  adev->ip_blocks[i].version->funcs->name, r);
3209 				return r;
3210 			}
3211 			adev->ip_blocks[i].status.hw = true;
3212 		}
3213 	}
3214 
3215 	return 0;
3216 }
3217 
3218 /**
3219  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3220  *
3221  * @adev: amdgpu_device pointer
3222  *
3223  * First resume function for hardware IPs.  The list of all the hardware
3224  * IPs that make up the asic is walked and the resume callbacks are run for
3225  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3226  * functional state after a suspend and updates the software state as
3227  * necessary.  This function is also used for restoring the GPU after a GPU
3228  * reset.
3229  * Returns 0 on success, negative error code on failure.
3230  */
3231 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3232 {
3233 	int i, r;
3234 
3235 	for (i = 0; i < adev->num_ip_blocks; i++) {
3236 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3237 			continue;
3238 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3239 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3240 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3241 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3242 			continue;
3243 		r = adev->ip_blocks[i].version->funcs->resume(adev);
3244 		if (r) {
3245 			DRM_ERROR("resume of IP block <%s> failed %d\n",
3246 				  adev->ip_blocks[i].version->funcs->name, r);
3247 			return r;
3248 		}
3249 		adev->ip_blocks[i].status.hw = true;
3250 
3251 		if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3252 			/* disable gfxoff for IP resume. The gfxoff will be re-enabled in
3253 			 * amdgpu_device_resume() after IP resume.
3254 			 */
3255 			amdgpu_gfx_off_ctrl(adev, false);
3256 			DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
3257 		}
3258 
3259 	}
3260 
3261 	return 0;
3262 }
3263 
3264 /**
3265  * amdgpu_device_ip_resume - run resume for hardware IPs
3266  *
3267  * @adev: amdgpu_device pointer
3268  *
3269  * Main resume function for hardware IPs.  The hardware IPs
3270  * are split into two resume functions because they are
3271  * are also used in in recovering from a GPU reset and some additional
3272  * steps need to be take between them.  In this case (S3/S4) they are
3273  * run sequentially.
3274  * Returns 0 on success, negative error code on failure.
3275  */
3276 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3277 {
3278 	int r;
3279 
3280 	r = amdgpu_amdkfd_resume_iommu(adev);
3281 	if (r)
3282 		return r;
3283 
3284 	r = amdgpu_device_ip_resume_phase1(adev);
3285 	if (r)
3286 		return r;
3287 
3288 	r = amdgpu_device_fw_loading(adev);
3289 	if (r)
3290 		return r;
3291 
3292 	r = amdgpu_device_ip_resume_phase2(adev);
3293 
3294 	return r;
3295 }
3296 
3297 /**
3298  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3299  *
3300  * @adev: amdgpu_device pointer
3301  *
3302  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3303  */
3304 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3305 {
3306 	if (amdgpu_sriov_vf(adev)) {
3307 		if (adev->is_atom_fw) {
3308 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3309 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3310 		} else {
3311 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3312 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3313 		}
3314 
3315 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3316 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3317 	}
3318 }
3319 
3320 /**
3321  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3322  *
3323  * @asic_type: AMD asic type
3324  *
3325  * Check if there is DC (new modesetting infrastructre) support for an asic.
3326  * returns true if DC has support, false if not.
3327  */
3328 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3329 {
3330 	switch (asic_type) {
3331 #ifdef CONFIG_DRM_AMDGPU_SI
3332 	case CHIP_HAINAN:
3333 #endif
3334 	case CHIP_TOPAZ:
3335 		/* chips with no display hardware */
3336 		return false;
3337 #if defined(CONFIG_DRM_AMD_DC)
3338 	case CHIP_TAHITI:
3339 	case CHIP_PITCAIRN:
3340 	case CHIP_VERDE:
3341 	case CHIP_OLAND:
3342 		/*
3343 		 * We have systems in the wild with these ASICs that require
3344 		 * LVDS and VGA support which is not supported with DC.
3345 		 *
3346 		 * Fallback to the non-DC driver here by default so as not to
3347 		 * cause regressions.
3348 		 */
3349 #if defined(CONFIG_DRM_AMD_DC_SI)
3350 		return amdgpu_dc > 0;
3351 #else
3352 		return false;
3353 #endif
3354 	case CHIP_BONAIRE:
3355 	case CHIP_KAVERI:
3356 	case CHIP_KABINI:
3357 	case CHIP_MULLINS:
3358 		/*
3359 		 * We have systems in the wild with these ASICs that require
3360 		 * VGA support which is not supported with DC.
3361 		 *
3362 		 * Fallback to the non-DC driver here by default so as not to
3363 		 * cause regressions.
3364 		 */
3365 		return amdgpu_dc > 0;
3366 	default:
3367 		return amdgpu_dc != 0;
3368 #else
3369 	default:
3370 		if (amdgpu_dc > 0)
3371 			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3372 					 "but isn't supported by ASIC, ignoring\n");
3373 		return false;
3374 #endif
3375 	}
3376 }
3377 
3378 /**
3379  * amdgpu_device_has_dc_support - check if dc is supported
3380  *
3381  * @adev: amdgpu_device pointer
3382  *
3383  * Returns true for supported, false for not supported
3384  */
3385 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3386 {
3387 	if (amdgpu_sriov_vf(adev) ||
3388 	    adev->enable_virtual_display ||
3389 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3390 		return false;
3391 
3392 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3393 }
3394 
3395 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3396 {
3397 	struct amdgpu_device *adev =
3398 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3399 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3400 
3401 	/* It's a bug to not have a hive within this function */
3402 	if (WARN_ON(!hive))
3403 		return;
3404 
3405 	/*
3406 	 * Use task barrier to synchronize all xgmi reset works across the
3407 	 * hive. task_barrier_enter and task_barrier_exit will block
3408 	 * until all the threads running the xgmi reset works reach
3409 	 * those points. task_barrier_full will do both blocks.
3410 	 */
3411 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3412 
3413 		task_barrier_enter(&hive->tb);
3414 		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3415 
3416 		if (adev->asic_reset_res)
3417 			goto fail;
3418 
3419 		task_barrier_exit(&hive->tb);
3420 		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3421 
3422 		if (adev->asic_reset_res)
3423 			goto fail;
3424 
3425 		if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3426 		    adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3427 			adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3428 	} else {
3429 
3430 		task_barrier_full(&hive->tb);
3431 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3432 	}
3433 
3434 fail:
3435 	if (adev->asic_reset_res)
3436 		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3437 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3438 	amdgpu_put_xgmi_hive(hive);
3439 }
3440 
3441 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3442 {
3443 	char *input = amdgpu_lockup_timeout;
3444 	char *timeout_setting = NULL;
3445 	int index = 0;
3446 	long timeout;
3447 	int ret = 0;
3448 
3449 	/*
3450 	 * By default timeout for non compute jobs is 10000
3451 	 * and 60000 for compute jobs.
3452 	 * In SR-IOV or passthrough mode, timeout for compute
3453 	 * jobs are 60000 by default.
3454 	 */
3455 	adev->gfx_timeout = msecs_to_jiffies(10000);
3456 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3457 	if (amdgpu_sriov_vf(adev))
3458 		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3459 					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3460 	else
3461 		adev->compute_timeout =  msecs_to_jiffies(60000);
3462 
3463 #ifdef notyet
3464 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3465 		while ((timeout_setting = strsep(&input, ",")) &&
3466 				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3467 			ret = kstrtol(timeout_setting, 0, &timeout);
3468 			if (ret)
3469 				return ret;
3470 
3471 			if (timeout == 0) {
3472 				index++;
3473 				continue;
3474 			} else if (timeout < 0) {
3475 				timeout = MAX_SCHEDULE_TIMEOUT;
3476 				dev_warn(adev->dev, "lockup timeout disabled");
3477 				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3478 			} else {
3479 				timeout = msecs_to_jiffies(timeout);
3480 			}
3481 
3482 			switch (index++) {
3483 			case 0:
3484 				adev->gfx_timeout = timeout;
3485 				break;
3486 			case 1:
3487 				adev->compute_timeout = timeout;
3488 				break;
3489 			case 2:
3490 				adev->sdma_timeout = timeout;
3491 				break;
3492 			case 3:
3493 				adev->video_timeout = timeout;
3494 				break;
3495 			default:
3496 				break;
3497 			}
3498 		}
3499 		/*
3500 		 * There is only one value specified and
3501 		 * it should apply to all non-compute jobs.
3502 		 */
3503 		if (index == 1) {
3504 			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3505 			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3506 				adev->compute_timeout = adev->gfx_timeout;
3507 		}
3508 	}
3509 #endif
3510 
3511 	return ret;
3512 }
3513 
3514 /**
3515  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3516  *
3517  * @adev: amdgpu_device pointer
3518  *
3519  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3520  */
3521 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3522 {
3523 #ifdef notyet
3524 	struct iommu_domain *domain;
3525 
3526 	domain = iommu_get_domain_for_dev(adev->dev);
3527 	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3528 #endif
3529 		adev->ram_is_direct_mapped = true;
3530 }
3531 
3532 static const struct attribute *amdgpu_dev_attributes[] = {
3533 	&dev_attr_product_name.attr,
3534 	&dev_attr_product_number.attr,
3535 	&dev_attr_serial_number.attr,
3536 	&dev_attr_pcie_replay_count.attr,
3537 	NULL
3538 };
3539 
3540 /**
3541  * amdgpu_device_init - initialize the driver
3542  *
3543  * @adev: amdgpu_device pointer
3544  * @flags: driver flags
3545  *
3546  * Initializes the driver info and hw (all asics).
3547  * Returns 0 for success or an error on failure.
3548  * Called at driver startup.
3549  */
3550 int amdgpu_device_init(struct amdgpu_device *adev,
3551 		       uint32_t flags)
3552 {
3553 	struct drm_device *ddev = adev_to_drm(adev);
3554 	struct pci_dev *pdev = adev->pdev;
3555 	int r, i;
3556 	bool px = false;
3557 	u32 max_MBps;
3558 
3559 	adev->shutdown = false;
3560 	adev->flags = flags;
3561 
3562 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3563 		adev->asic_type = amdgpu_force_asic_type;
3564 	else
3565 		adev->asic_type = flags & AMD_ASIC_MASK;
3566 
3567 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3568 	if (amdgpu_emu_mode == 1)
3569 		adev->usec_timeout *= 10;
3570 	adev->gmc.gart_size = 512 * 1024 * 1024;
3571 	adev->accel_working = false;
3572 	adev->num_rings = 0;
3573 	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3574 	adev->mman.buffer_funcs = NULL;
3575 	adev->mman.buffer_funcs_ring = NULL;
3576 	adev->vm_manager.vm_pte_funcs = NULL;
3577 	adev->vm_manager.vm_pte_num_scheds = 0;
3578 	adev->gmc.gmc_funcs = NULL;
3579 	adev->harvest_ip_mask = 0x0;
3580 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3581 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3582 
3583 	adev->smc_rreg = &amdgpu_invalid_rreg;
3584 	adev->smc_wreg = &amdgpu_invalid_wreg;
3585 	adev->pcie_rreg = &amdgpu_invalid_rreg;
3586 	adev->pcie_wreg = &amdgpu_invalid_wreg;
3587 	adev->pciep_rreg = &amdgpu_invalid_rreg;
3588 	adev->pciep_wreg = &amdgpu_invalid_wreg;
3589 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3590 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3591 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3592 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3593 	adev->didt_rreg = &amdgpu_invalid_rreg;
3594 	adev->didt_wreg = &amdgpu_invalid_wreg;
3595 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3596 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3597 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3598 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3599 
3600 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3601 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3602 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3603 
3604 	/* mutex initialization are all done here so we
3605 	 * can recall function without having locking issues */
3606 	rw_init(&adev->firmware.mutex, "agfw");
3607 	rw_init(&adev->pm.mutex, "agpm");
3608 	rw_init(&adev->gfx.gpu_clock_mutex, "gfxclk");
3609 	rw_init(&adev->srbm_mutex, "srbm");
3610 	rw_init(&adev->gfx.pipe_reserve_mutex, "pipers");
3611 	rw_init(&adev->gfx.gfx_off_mutex, "gfxoff");
3612 	rw_init(&adev->grbm_idx_mutex, "grbmidx");
3613 	rw_init(&adev->mn_lock, "agpumn");
3614 	rw_init(&adev->virt.vf_errors.lock, "vferr");
3615 	hash_init(adev->mn_hash);
3616 	rw_init(&adev->psp.mutex, "agpsp");
3617 	rw_init(&adev->notifier_lock, "agnf");
3618 	rw_init(&adev->pm.stable_pstate_ctx_lock, "agps");
3619 	rw_init(&adev->benchmark_mutex, "agbm");
3620 
3621 	amdgpu_device_init_apu_flags(adev);
3622 
3623 	r = amdgpu_device_check_arguments(adev);
3624 	if (r)
3625 		return r;
3626 
3627 	mtx_init(&adev->mmio_idx_lock, IPL_TTY);
3628 	mtx_init(&adev->smc_idx_lock, IPL_TTY);
3629 	mtx_init(&adev->pcie_idx_lock, IPL_TTY);
3630 	mtx_init(&adev->uvd_ctx_idx_lock, IPL_TTY);
3631 	mtx_init(&adev->didt_idx_lock, IPL_TTY);
3632 	mtx_init(&adev->gc_cac_idx_lock, IPL_TTY);
3633 	mtx_init(&adev->se_cac_idx_lock, IPL_TTY);
3634 	mtx_init(&adev->audio_endpt_idx_lock, IPL_TTY);
3635 	mtx_init(&adev->mm_stats.lock, IPL_NONE);
3636 
3637 	INIT_LIST_HEAD(&adev->shadow_list);
3638 	rw_init(&adev->shadow_list_lock, "sdwlst");
3639 
3640 	INIT_LIST_HEAD(&adev->reset_list);
3641 
3642 	INIT_LIST_HEAD(&adev->ras_list);
3643 
3644 	INIT_DELAYED_WORK(&adev->delayed_init_work,
3645 			  amdgpu_device_delayed_init_work_handler);
3646 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3647 			  amdgpu_device_delay_enable_gfx_off);
3648 
3649 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3650 
3651 	adev->gfx.gfx_off_req_count = 1;
3652 	adev->gfx.gfx_off_residency = 0;
3653 	adev->gfx.gfx_off_entrycount = 0;
3654 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3655 
3656 	atomic_set(&adev->throttling_logging_enabled, 1);
3657 	/*
3658 	 * If throttling continues, logging will be performed every minute
3659 	 * to avoid log flooding. "-1" is subtracted since the thermal
3660 	 * throttling interrupt comes every second. Thus, the total logging
3661 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3662 	 * for throttling interrupt) = 60 seconds.
3663 	 */
3664 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3665 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3666 
3667 #ifdef __linux__
3668 	/* Registers mapping */
3669 	/* TODO: block userspace mapping of io register */
3670 	if (adev->asic_type >= CHIP_BONAIRE) {
3671 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3672 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3673 	} else {
3674 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3675 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3676 	}
3677 
3678 	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3679 		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3680 
3681 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3682 	if (adev->rmmio == NULL) {
3683 		return -ENOMEM;
3684 	}
3685 #endif
3686 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3687 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3688 
3689 	amdgpu_device_get_pcie_info(adev);
3690 
3691 	if (amdgpu_mcbp)
3692 		DRM_INFO("MCBP is enabled\n");
3693 
3694 	/*
3695 	 * Reset domain needs to be present early, before XGMI hive discovered
3696 	 * (if any) and intitialized to use reset sem and in_gpu reset flag
3697 	 * early on during init and before calling to RREG32.
3698 	 */
3699 	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3700 	if (!adev->reset_domain)
3701 		return -ENOMEM;
3702 
3703 	/* detect hw virtualization here */
3704 	amdgpu_detect_virtualization(adev);
3705 
3706 	r = amdgpu_device_get_job_timeout_settings(adev);
3707 	if (r) {
3708 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3709 		return r;
3710 	}
3711 
3712 	/* early init functions */
3713 	r = amdgpu_device_ip_early_init(adev);
3714 	if (r)
3715 		return r;
3716 
3717 	/* Enable TMZ based on IP_VERSION */
3718 	amdgpu_gmc_tmz_set(adev);
3719 
3720 	amdgpu_gmc_noretry_set(adev);
3721 	/* Need to get xgmi info early to decide the reset behavior*/
3722 	if (adev->gmc.xgmi.supported) {
3723 		r = adev->gfxhub.funcs->get_xgmi_info(adev);
3724 		if (r)
3725 			return r;
3726 	}
3727 
3728 	/* enable PCIE atomic ops */
3729 #ifdef notyet
3730 	if (amdgpu_sriov_vf(adev))
3731 		adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3732 			adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3733 			(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3734 	else
3735 		adev->have_atomics_support =
3736 			!pci_enable_atomic_ops_to_root(adev->pdev,
3737 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3738 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3739 	if (!adev->have_atomics_support)
3740 		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3741 #else
3742 	adev->have_atomics_support = false;
3743 #endif
3744 
3745 	/* doorbell bar mapping and doorbell index init*/
3746 	amdgpu_device_doorbell_init(adev);
3747 
3748 	if (amdgpu_emu_mode == 1) {
3749 		/* post the asic on emulation mode */
3750 		emu_soc_asic_init(adev);
3751 		goto fence_driver_init;
3752 	}
3753 
3754 	amdgpu_reset_init(adev);
3755 
3756 	/* detect if we are with an SRIOV vbios */
3757 	amdgpu_device_detect_sriov_bios(adev);
3758 
3759 	/* check if we need to reset the asic
3760 	 *  E.g., driver was not cleanly unloaded previously, etc.
3761 	 */
3762 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3763 		if (adev->gmc.xgmi.num_physical_nodes) {
3764 			dev_info(adev->dev, "Pending hive reset.\n");
3765 			adev->gmc.xgmi.pending_reset = true;
3766 			/* Only need to init necessary block for SMU to handle the reset */
3767 			for (i = 0; i < adev->num_ip_blocks; i++) {
3768 				if (!adev->ip_blocks[i].status.valid)
3769 					continue;
3770 				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3771 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3772 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3773 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3774 					DRM_DEBUG("IP %s disabled for hw_init.\n",
3775 						adev->ip_blocks[i].version->funcs->name);
3776 					adev->ip_blocks[i].status.hw = true;
3777 				}
3778 			}
3779 		} else {
3780 			r = amdgpu_asic_reset(adev);
3781 			if (r) {
3782 				dev_err(adev->dev, "asic reset on init failed\n");
3783 				goto failed;
3784 			}
3785 		}
3786 	}
3787 
3788 	pci_enable_pcie_error_reporting(adev->pdev);
3789 
3790 	/* Post card if necessary */
3791 	if (amdgpu_device_need_post(adev)) {
3792 		if (!adev->bios) {
3793 			dev_err(adev->dev, "no vBIOS found\n");
3794 			r = -EINVAL;
3795 			goto failed;
3796 		}
3797 		DRM_INFO("GPU posting now...\n");
3798 		r = amdgpu_device_asic_init(adev);
3799 		if (r) {
3800 			dev_err(adev->dev, "gpu post error!\n");
3801 			goto failed;
3802 		}
3803 	}
3804 
3805 	if (adev->is_atom_fw) {
3806 		/* Initialize clocks */
3807 		r = amdgpu_atomfirmware_get_clock_info(adev);
3808 		if (r) {
3809 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3810 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3811 			goto failed;
3812 		}
3813 	} else {
3814 		/* Initialize clocks */
3815 		r = amdgpu_atombios_get_clock_info(adev);
3816 		if (r) {
3817 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3818 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3819 			goto failed;
3820 		}
3821 		/* init i2c buses */
3822 		if (!amdgpu_device_has_dc_support(adev))
3823 			amdgpu_atombios_i2c_init(adev);
3824 	}
3825 
3826 fence_driver_init:
3827 	/* Fence driver */
3828 	r = amdgpu_fence_driver_sw_init(adev);
3829 	if (r) {
3830 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3831 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3832 		goto failed;
3833 	}
3834 
3835 	/* init the mode config */
3836 	drm_mode_config_init(adev_to_drm(adev));
3837 
3838 	r = amdgpu_device_ip_init(adev);
3839 	if (r) {
3840 		/* failed in exclusive mode due to timeout */
3841 		if (amdgpu_sriov_vf(adev) &&
3842 		    !amdgpu_sriov_runtime(adev) &&
3843 		    amdgpu_virt_mmio_blocked(adev) &&
3844 		    !amdgpu_virt_wait_reset(adev)) {
3845 			dev_err(adev->dev, "VF exclusive mode timeout\n");
3846 			/* Don't send request since VF is inactive. */
3847 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3848 			adev->virt.ops = NULL;
3849 			r = -EAGAIN;
3850 			goto release_ras_con;
3851 		}
3852 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3853 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3854 		goto release_ras_con;
3855 	}
3856 
3857 	amdgpu_fence_driver_hw_init(adev);
3858 
3859 	dev_info(adev->dev,
3860 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3861 			adev->gfx.config.max_shader_engines,
3862 			adev->gfx.config.max_sh_per_se,
3863 			adev->gfx.config.max_cu_per_sh,
3864 			adev->gfx.cu_info.number);
3865 
3866 #ifdef __OpenBSD__
3867 {
3868 	const char *chip_name;
3869 	uint32_t version = adev->ip_versions[GC_HWIP][0];
3870 	int maj, min, rev;
3871 
3872 	switch (adev->asic_type) {
3873 	case CHIP_RAVEN:
3874 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
3875 			chip_name = "RAVEN2";
3876 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
3877 			chip_name = "PICASSO";
3878 		else
3879 			chip_name = "RAVEN";
3880 		break;
3881 	case CHIP_RENOIR:
3882 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
3883 			chip_name = "RENOIR";
3884 		else
3885 			chip_name = "GREEN_SARDINE";
3886 		break;
3887 	default:
3888 		chip_name = amdgpu_asic_name[adev->asic_type];
3889 	}
3890 
3891 	printf("%s: %s", adev->self.dv_xname, chip_name);
3892 	/* show graphics/compute ip block version, not set on < GFX9 */
3893 	if (version) {
3894 		maj = IP_VERSION_MAJ(version);
3895 		min = IP_VERSION_MIN(version);
3896 		rev = IP_VERSION_REV(version);
3897 		printf(" GC %d.%d.%d", maj, min, rev);
3898 	}
3899 	printf(" %d CU rev 0x%02x\n", adev->gfx.cu_info.number, adev->rev_id);
3900 }
3901 #endif
3902 
3903 	adev->accel_working = true;
3904 
3905 	amdgpu_vm_check_compute_bug(adev);
3906 
3907 	/* Initialize the buffer migration limit. */
3908 	if (amdgpu_moverate >= 0)
3909 		max_MBps = amdgpu_moverate;
3910 	else
3911 		max_MBps = 8; /* Allow 8 MB/s. */
3912 	/* Get a log2 for easy divisions. */
3913 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3914 
3915 	r = amdgpu_pm_sysfs_init(adev);
3916 	if (r) {
3917 		adev->pm_sysfs_en = false;
3918 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3919 	} else
3920 		adev->pm_sysfs_en = true;
3921 
3922 	r = amdgpu_ucode_sysfs_init(adev);
3923 	if (r) {
3924 		adev->ucode_sysfs_en = false;
3925 		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3926 	} else
3927 		adev->ucode_sysfs_en = true;
3928 
3929 	r = amdgpu_psp_sysfs_init(adev);
3930 	if (r) {
3931 		adev->psp_sysfs_en = false;
3932 		if (!amdgpu_sriov_vf(adev))
3933 			DRM_ERROR("Creating psp sysfs failed\n");
3934 	} else
3935 		adev->psp_sysfs_en = true;
3936 
3937 	/*
3938 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3939 	 * Otherwise the mgpu fan boost feature will be skipped due to the
3940 	 * gpu instance is counted less.
3941 	 */
3942 	amdgpu_register_gpu_instance(adev);
3943 
3944 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3945 	 * explicit gating rather than handling it automatically.
3946 	 */
3947 	if (!adev->gmc.xgmi.pending_reset) {
3948 		r = amdgpu_device_ip_late_init(adev);
3949 		if (r) {
3950 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3951 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3952 			goto release_ras_con;
3953 		}
3954 		/* must succeed. */
3955 		amdgpu_ras_resume(adev);
3956 		queue_delayed_work(system_wq, &adev->delayed_init_work,
3957 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3958 	}
3959 
3960 	if (amdgpu_sriov_vf(adev))
3961 		flush_delayed_work(&adev->delayed_init_work);
3962 
3963 	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3964 	if (r)
3965 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3966 
3967 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3968 		r = amdgpu_pmu_init(adev);
3969 	if (r)
3970 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3971 
3972 	/* Have stored pci confspace at hand for restore in sudden PCI error */
3973 	if (amdgpu_device_cache_pci_state(adev->pdev))
3974 		pci_restore_state(pdev);
3975 
3976 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3977 	/* this will fail for cards that aren't VGA class devices, just
3978 	 * ignore it */
3979 #ifdef notyet
3980 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3981 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3982 #endif
3983 
3984 	if (amdgpu_device_supports_px(ddev)) {
3985 		px = true;
3986 		vga_switcheroo_register_client(adev->pdev,
3987 					       &amdgpu_switcheroo_ops, px);
3988 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3989 	}
3990 
3991 	if (adev->gmc.xgmi.pending_reset)
3992 		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3993 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3994 
3995 	amdgpu_device_check_iommu_direct_map(adev);
3996 
3997 	return 0;
3998 
3999 release_ras_con:
4000 	amdgpu_release_ras_context(adev);
4001 
4002 failed:
4003 	amdgpu_vf_error_trans_all(adev);
4004 
4005 	return r;
4006 }
4007 
4008 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4009 {
4010 	STUB();
4011 #ifdef notyet
4012 	/* Clear all CPU mappings pointing to this device */
4013 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4014 #endif
4015 
4016 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4017 	amdgpu_device_doorbell_fini(adev);
4018 
4019 #ifdef __linux__
4020 	iounmap(adev->rmmio);
4021 	adev->rmmio = NULL;
4022 	if (adev->mman.aper_base_kaddr)
4023 		iounmap(adev->mman.aper_base_kaddr);
4024 	adev->mman.aper_base_kaddr = NULL;
4025 #else
4026 	if (adev->rmmio_size > 0)
4027 		bus_space_unmap(adev->rmmio_bst, adev->rmmio_bsh,
4028 		    adev->rmmio_size);
4029 	adev->rmmio_size = 0;
4030 	adev->rmmio = NULL;
4031 	if (adev->mman.aper_base_kaddr)
4032 		bus_space_unmap(adev->memt, adev->mman.aper_bsh,
4033 		    adev->gmc.visible_vram_size);
4034 	adev->mman.aper_base_kaddr = NULL;
4035 #endif
4036 
4037 	/* Memory manager related */
4038 	if (!adev->gmc.xgmi.connected_to_cpu) {
4039 #ifdef __linux__
4040 		arch_phys_wc_del(adev->gmc.vram_mtrr);
4041 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4042 #else
4043 		drm_mtrr_del(0, adev->gmc.aper_base, adev->gmc.aper_size, DRM_MTRR_WC);
4044 #endif
4045 	}
4046 }
4047 
4048 /**
4049  * amdgpu_device_fini_hw - tear down the driver
4050  *
4051  * @adev: amdgpu_device pointer
4052  *
4053  * Tear down the driver info (all asics).
4054  * Called at driver shutdown.
4055  */
4056 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4057 {
4058 	dev_info(adev->dev, "amdgpu: finishing device.\n");
4059 	flush_delayed_work(&adev->delayed_init_work);
4060 	adev->shutdown = true;
4061 
4062 	/* make sure IB test finished before entering exclusive mode
4063 	 * to avoid preemption on IB test
4064 	 * */
4065 	if (amdgpu_sriov_vf(adev)) {
4066 		amdgpu_virt_request_full_gpu(adev, false);
4067 		amdgpu_virt_fini_data_exchange(adev);
4068 	}
4069 
4070 	/* disable all interrupts */
4071 	amdgpu_irq_disable_all(adev);
4072 	if (adev->mode_info.mode_config_initialized){
4073 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4074 			drm_helper_force_disable_all(adev_to_drm(adev));
4075 		else
4076 			drm_atomic_helper_shutdown(adev_to_drm(adev));
4077 	}
4078 	amdgpu_fence_driver_hw_fini(adev);
4079 
4080 	if (adev->mman.initialized) {
4081 		flush_delayed_work(&adev->mman.bdev.wq);
4082 		ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
4083 	}
4084 
4085 	if (adev->pm_sysfs_en)
4086 		amdgpu_pm_sysfs_fini(adev);
4087 	if (adev->ucode_sysfs_en)
4088 		amdgpu_ucode_sysfs_fini(adev);
4089 	if (adev->psp_sysfs_en)
4090 		amdgpu_psp_sysfs_fini(adev);
4091 	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4092 
4093 	/* disable ras feature must before hw fini */
4094 	amdgpu_ras_pre_fini(adev);
4095 
4096 	amdgpu_device_ip_fini_early(adev);
4097 
4098 	amdgpu_irq_fini_hw(adev);
4099 
4100 	if (adev->mman.initialized)
4101 		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4102 
4103 	amdgpu_gart_dummy_page_fini(adev);
4104 
4105 	amdgpu_device_unmap_mmio(adev);
4106 
4107 }
4108 
4109 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4110 {
4111 	int idx;
4112 
4113 	amdgpu_fence_driver_sw_fini(adev);
4114 	amdgpu_device_ip_fini(adev);
4115 	release_firmware(adev->firmware.gpu_info_fw);
4116 	adev->firmware.gpu_info_fw = NULL;
4117 	adev->accel_working = false;
4118 	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4119 
4120 	amdgpu_reset_fini(adev);
4121 
4122 	/* free i2c buses */
4123 	if (!amdgpu_device_has_dc_support(adev))
4124 		amdgpu_i2c_fini(adev);
4125 
4126 	if (amdgpu_emu_mode != 1)
4127 		amdgpu_atombios_fini(adev);
4128 
4129 	kfree(adev->bios);
4130 	adev->bios = NULL;
4131 	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
4132 		vga_switcheroo_unregister_client(adev->pdev);
4133 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4134 	}
4135 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4136 		vga_client_unregister(adev->pdev);
4137 
4138 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4139 #ifdef __linux__
4140 		iounmap(adev->rmmio);
4141 		adev->rmmio = NULL;
4142 #else
4143 		if (adev->rmmio_size > 0)
4144 			bus_space_unmap(adev->rmmio_bst, adev->rmmio_bsh,
4145 			    adev->rmmio_size);
4146 		adev->rmmio_size = 0;
4147 		adev->rmmio = NULL;
4148 #endif
4149 		amdgpu_device_doorbell_fini(adev);
4150 		drm_dev_exit(idx);
4151 	}
4152 
4153 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4154 		amdgpu_pmu_fini(adev);
4155 	if (adev->mman.discovery_bin)
4156 		amdgpu_discovery_fini(adev);
4157 
4158 	amdgpu_reset_put_reset_domain(adev->reset_domain);
4159 	adev->reset_domain = NULL;
4160 
4161 	kfree(adev->pci_state);
4162 
4163 }
4164 
4165 /**
4166  * amdgpu_device_evict_resources - evict device resources
4167  * @adev: amdgpu device object
4168  *
4169  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4170  * of the vram memory type. Mainly used for evicting device resources
4171  * at suspend time.
4172  *
4173  */
4174 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4175 {
4176 	int ret;
4177 
4178 	/* No need to evict vram on APUs for suspend to ram or s2idle */
4179 	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4180 		return 0;
4181 
4182 	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4183 	if (ret)
4184 		DRM_WARN("evicting device resources failed\n");
4185 	return ret;
4186 }
4187 
4188 /*
4189  * Suspend & resume.
4190  */
4191 /**
4192  * amdgpu_device_suspend - initiate device suspend
4193  *
4194  * @dev: drm dev pointer
4195  * @fbcon : notify the fbdev of suspend
4196  *
4197  * Puts the hw in the suspend state (all asics).
4198  * Returns 0 for success or an error on failure.
4199  * Called at driver suspend.
4200  */
4201 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4202 {
4203 	struct amdgpu_device *adev = drm_to_adev(dev);
4204 	int r = 0;
4205 
4206 	if (adev->shutdown)
4207 		return 0;
4208 
4209 #ifdef notyet
4210 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4211 		return 0;
4212 #endif
4213 
4214 	adev->in_suspend = true;
4215 
4216 	if (amdgpu_sriov_vf(adev)) {
4217 		amdgpu_virt_fini_data_exchange(adev);
4218 		r = amdgpu_virt_request_full_gpu(adev, false);
4219 		if (r)
4220 			return r;
4221 	}
4222 
4223 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4224 		DRM_WARN("smart shift update failed\n");
4225 
4226 	drm_kms_helper_poll_disable(dev);
4227 
4228 	if (fbcon)
4229 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4230 
4231 	cancel_delayed_work_sync(&adev->delayed_init_work);
4232 
4233 	amdgpu_ras_suspend(adev);
4234 
4235 	amdgpu_device_ip_suspend_phase1(adev);
4236 
4237 	if (!adev->in_s0ix)
4238 		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4239 
4240 	r = amdgpu_device_evict_resources(adev);
4241 	if (r)
4242 		return r;
4243 
4244 	amdgpu_fence_driver_hw_fini(adev);
4245 
4246 	amdgpu_device_ip_suspend_phase2(adev);
4247 
4248 	if (amdgpu_sriov_vf(adev))
4249 		amdgpu_virt_release_full_gpu(adev, false);
4250 
4251 	return 0;
4252 }
4253 
4254 /**
4255  * amdgpu_device_resume - initiate device resume
4256  *
4257  * @dev: drm dev pointer
4258  * @fbcon : notify the fbdev of resume
4259  *
4260  * Bring the hw back to operating state (all asics).
4261  * Returns 0 for success or an error on failure.
4262  * Called at driver resume.
4263  */
4264 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4265 {
4266 	struct amdgpu_device *adev = drm_to_adev(dev);
4267 	int r = 0;
4268 
4269 	if (amdgpu_sriov_vf(adev)) {
4270 		r = amdgpu_virt_request_full_gpu(adev, true);
4271 		if (r)
4272 			return r;
4273 	}
4274 
4275 #ifdef notyet
4276 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4277 		return 0;
4278 #endif
4279 
4280 	if (adev->in_s0ix)
4281 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4282 
4283 	/* post card */
4284 	if (amdgpu_device_need_post(adev)) {
4285 		r = amdgpu_device_asic_init(adev);
4286 		if (r)
4287 			dev_err(adev->dev, "amdgpu asic init failed\n");
4288 	}
4289 
4290 	r = amdgpu_device_ip_resume(adev);
4291 
4292 	/* no matter what r is, always need to properly release full GPU */
4293 	if (amdgpu_sriov_vf(adev)) {
4294 		amdgpu_virt_init_data_exchange(adev);
4295 		amdgpu_virt_release_full_gpu(adev, true);
4296 	}
4297 
4298 	if (r) {
4299 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4300 		return r;
4301 	}
4302 	amdgpu_fence_driver_hw_init(adev);
4303 
4304 	r = amdgpu_device_ip_late_init(adev);
4305 	if (r)
4306 		return r;
4307 
4308 	queue_delayed_work(system_wq, &adev->delayed_init_work,
4309 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4310 
4311 	if (!adev->in_s0ix) {
4312 		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4313 		if (r)
4314 			return r;
4315 	}
4316 
4317 	/* Make sure IB tests flushed */
4318 	flush_delayed_work(&adev->delayed_init_work);
4319 
4320 	if (adev->in_s0ix) {
4321 		/* re-enable gfxoff after IP resume. This re-enables gfxoff after
4322 		 * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
4323 		 */
4324 		amdgpu_gfx_off_ctrl(adev, true);
4325 		DRM_DEBUG("will enable gfxoff for the mission mode\n");
4326 	}
4327 	if (fbcon)
4328 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4329 
4330 	drm_kms_helper_poll_enable(dev);
4331 
4332 	amdgpu_ras_resume(adev);
4333 
4334 	/*
4335 	 * Most of the connector probing functions try to acquire runtime pm
4336 	 * refs to ensure that the GPU is powered on when connector polling is
4337 	 * performed. Since we're calling this from a runtime PM callback,
4338 	 * trying to acquire rpm refs will cause us to deadlock.
4339 	 *
4340 	 * Since we're guaranteed to be holding the rpm lock, it's safe to
4341 	 * temporarily disable the rpm helpers so this doesn't deadlock us.
4342 	 */
4343 #if defined(CONFIG_PM) && defined(__linux__)
4344 	dev->dev->power.disable_depth++;
4345 #endif
4346 	if (!amdgpu_device_has_dc_support(adev))
4347 		drm_helper_hpd_irq_event(dev);
4348 	else
4349 		drm_kms_helper_hotplug_event(dev);
4350 #if defined(CONFIG_PM) && defined(__linux__)
4351 	dev->dev->power.disable_depth--;
4352 #endif
4353 	adev->in_suspend = false;
4354 
4355 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4356 		DRM_WARN("smart shift update failed\n");
4357 
4358 	return 0;
4359 }
4360 
4361 /**
4362  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4363  *
4364  * @adev: amdgpu_device pointer
4365  *
4366  * The list of all the hardware IPs that make up the asic is walked and
4367  * the check_soft_reset callbacks are run.  check_soft_reset determines
4368  * if the asic is still hung or not.
4369  * Returns true if any of the IPs are still in a hung state, false if not.
4370  */
4371 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4372 {
4373 	int i;
4374 	bool asic_hang = false;
4375 
4376 	if (amdgpu_sriov_vf(adev))
4377 		return true;
4378 
4379 	if (amdgpu_asic_need_full_reset(adev))
4380 		return true;
4381 
4382 	for (i = 0; i < adev->num_ip_blocks; i++) {
4383 		if (!adev->ip_blocks[i].status.valid)
4384 			continue;
4385 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4386 			adev->ip_blocks[i].status.hang =
4387 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4388 		if (adev->ip_blocks[i].status.hang) {
4389 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4390 			asic_hang = true;
4391 		}
4392 	}
4393 	return asic_hang;
4394 }
4395 
4396 /**
4397  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4398  *
4399  * @adev: amdgpu_device pointer
4400  *
4401  * The list of all the hardware IPs that make up the asic is walked and the
4402  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4403  * handles any IP specific hardware or software state changes that are
4404  * necessary for a soft reset to succeed.
4405  * Returns 0 on success, negative error code on failure.
4406  */
4407 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4408 {
4409 	int i, r = 0;
4410 
4411 	for (i = 0; i < adev->num_ip_blocks; i++) {
4412 		if (!adev->ip_blocks[i].status.valid)
4413 			continue;
4414 		if (adev->ip_blocks[i].status.hang &&
4415 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4416 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4417 			if (r)
4418 				return r;
4419 		}
4420 	}
4421 
4422 	return 0;
4423 }
4424 
4425 /**
4426  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4427  *
4428  * @adev: amdgpu_device pointer
4429  *
4430  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4431  * reset is necessary to recover.
4432  * Returns true if a full asic reset is required, false if not.
4433  */
4434 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4435 {
4436 	int i;
4437 
4438 	if (amdgpu_asic_need_full_reset(adev))
4439 		return true;
4440 
4441 	for (i = 0; i < adev->num_ip_blocks; i++) {
4442 		if (!adev->ip_blocks[i].status.valid)
4443 			continue;
4444 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4445 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4446 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4447 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4448 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4449 			if (adev->ip_blocks[i].status.hang) {
4450 				dev_info(adev->dev, "Some block need full reset!\n");
4451 				return true;
4452 			}
4453 		}
4454 	}
4455 	return false;
4456 }
4457 
4458 /**
4459  * amdgpu_device_ip_soft_reset - do a soft reset
4460  *
4461  * @adev: amdgpu_device pointer
4462  *
4463  * The list of all the hardware IPs that make up the asic is walked and the
4464  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4465  * IP specific hardware or software state changes that are necessary to soft
4466  * reset the IP.
4467  * Returns 0 on success, negative error code on failure.
4468  */
4469 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4470 {
4471 	int i, r = 0;
4472 
4473 	for (i = 0; i < adev->num_ip_blocks; i++) {
4474 		if (!adev->ip_blocks[i].status.valid)
4475 			continue;
4476 		if (adev->ip_blocks[i].status.hang &&
4477 		    adev->ip_blocks[i].version->funcs->soft_reset) {
4478 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4479 			if (r)
4480 				return r;
4481 		}
4482 	}
4483 
4484 	return 0;
4485 }
4486 
4487 /**
4488  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4489  *
4490  * @adev: amdgpu_device pointer
4491  *
4492  * The list of all the hardware IPs that make up the asic is walked and the
4493  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4494  * handles any IP specific hardware or software state changes that are
4495  * necessary after the IP has been soft reset.
4496  * Returns 0 on success, negative error code on failure.
4497  */
4498 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4499 {
4500 	int i, r = 0;
4501 
4502 	for (i = 0; i < adev->num_ip_blocks; i++) {
4503 		if (!adev->ip_blocks[i].status.valid)
4504 			continue;
4505 		if (adev->ip_blocks[i].status.hang &&
4506 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4507 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4508 		if (r)
4509 			return r;
4510 	}
4511 
4512 	return 0;
4513 }
4514 
4515 /**
4516  * amdgpu_device_recover_vram - Recover some VRAM contents
4517  *
4518  * @adev: amdgpu_device pointer
4519  *
4520  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4521  * restore things like GPUVM page tables after a GPU reset where
4522  * the contents of VRAM might be lost.
4523  *
4524  * Returns:
4525  * 0 on success, negative error code on failure.
4526  */
4527 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4528 {
4529 	struct dma_fence *fence = NULL, *next = NULL;
4530 	struct amdgpu_bo *shadow;
4531 	struct amdgpu_bo_vm *vmbo;
4532 	long r = 1, tmo;
4533 
4534 	if (amdgpu_sriov_runtime(adev))
4535 		tmo = msecs_to_jiffies(8000);
4536 	else
4537 		tmo = msecs_to_jiffies(100);
4538 
4539 	dev_info(adev->dev, "recover vram bo from shadow start\n");
4540 	mutex_lock(&adev->shadow_list_lock);
4541 	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4542 		shadow = &vmbo->bo;
4543 		/* No need to recover an evicted BO */
4544 		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4545 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4546 		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4547 			continue;
4548 
4549 		r = amdgpu_bo_restore_shadow(shadow, &next);
4550 		if (r)
4551 			break;
4552 
4553 		if (fence) {
4554 			tmo = dma_fence_wait_timeout(fence, false, tmo);
4555 			dma_fence_put(fence);
4556 			fence = next;
4557 			if (tmo == 0) {
4558 				r = -ETIMEDOUT;
4559 				break;
4560 			} else if (tmo < 0) {
4561 				r = tmo;
4562 				break;
4563 			}
4564 		} else {
4565 			fence = next;
4566 		}
4567 	}
4568 	mutex_unlock(&adev->shadow_list_lock);
4569 
4570 	if (fence)
4571 		tmo = dma_fence_wait_timeout(fence, false, tmo);
4572 	dma_fence_put(fence);
4573 
4574 	if (r < 0 || tmo <= 0) {
4575 		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4576 		return -EIO;
4577 	}
4578 
4579 	dev_info(adev->dev, "recover vram bo from shadow done\n");
4580 	return 0;
4581 }
4582 
4583 
4584 /**
4585  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4586  *
4587  * @adev: amdgpu_device pointer
4588  * @from_hypervisor: request from hypervisor
4589  *
4590  * do VF FLR and reinitialize Asic
4591  * return 0 means succeeded otherwise failed
4592  */
4593 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4594 				     bool from_hypervisor)
4595 {
4596 	int r;
4597 	struct amdgpu_hive_info *hive = NULL;
4598 	int retry_limit = 0;
4599 
4600 retry:
4601 	amdgpu_amdkfd_pre_reset(adev);
4602 
4603 	if (from_hypervisor)
4604 		r = amdgpu_virt_request_full_gpu(adev, true);
4605 	else
4606 		r = amdgpu_virt_reset_gpu(adev);
4607 	if (r)
4608 		return r;
4609 
4610 	/* Resume IP prior to SMC */
4611 	r = amdgpu_device_ip_reinit_early_sriov(adev);
4612 	if (r)
4613 		goto error;
4614 
4615 	amdgpu_virt_init_data_exchange(adev);
4616 
4617 	r = amdgpu_device_fw_loading(adev);
4618 	if (r)
4619 		return r;
4620 
4621 	/* now we are okay to resume SMC/CP/SDMA */
4622 	r = amdgpu_device_ip_reinit_late_sriov(adev);
4623 	if (r)
4624 		goto error;
4625 
4626 	hive = amdgpu_get_xgmi_hive(adev);
4627 	/* Update PSP FW topology after reset */
4628 	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4629 		r = amdgpu_xgmi_update_topology(hive, adev);
4630 
4631 	if (hive)
4632 		amdgpu_put_xgmi_hive(hive);
4633 
4634 	if (!r) {
4635 		amdgpu_irq_gpu_reset_resume_helper(adev);
4636 		r = amdgpu_ib_ring_tests(adev);
4637 
4638 		amdgpu_amdkfd_post_reset(adev);
4639 	}
4640 
4641 error:
4642 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4643 		amdgpu_inc_vram_lost(adev);
4644 		r = amdgpu_device_recover_vram(adev);
4645 	}
4646 	amdgpu_virt_release_full_gpu(adev, true);
4647 
4648 	if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4649 		if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4650 			retry_limit++;
4651 			goto retry;
4652 		} else
4653 			DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4654 	}
4655 
4656 	return r;
4657 }
4658 
4659 /**
4660  * amdgpu_device_has_job_running - check if there is any job in mirror list
4661  *
4662  * @adev: amdgpu_device pointer
4663  *
4664  * check if there is any job in mirror list
4665  */
4666 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4667 {
4668 	int i;
4669 	struct drm_sched_job *job;
4670 
4671 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4672 		struct amdgpu_ring *ring = adev->rings[i];
4673 
4674 		if (!ring || !ring->sched.thread)
4675 			continue;
4676 
4677 		spin_lock(&ring->sched.job_list_lock);
4678 		job = list_first_entry_or_null(&ring->sched.pending_list,
4679 					       struct drm_sched_job, list);
4680 		spin_unlock(&ring->sched.job_list_lock);
4681 		if (job)
4682 			return true;
4683 	}
4684 	return false;
4685 }
4686 
4687 /**
4688  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4689  *
4690  * @adev: amdgpu_device pointer
4691  *
4692  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4693  * a hung GPU.
4694  */
4695 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4696 {
4697 
4698 	if (amdgpu_gpu_recovery == 0)
4699 		goto disabled;
4700 
4701 	if (!amdgpu_device_ip_check_soft_reset(adev)) {
4702 		dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
4703 		return false;
4704 	}
4705 
4706 	if (amdgpu_sriov_vf(adev))
4707 		return true;
4708 
4709 	if (amdgpu_gpu_recovery == -1) {
4710 		switch (adev->asic_type) {
4711 #ifdef CONFIG_DRM_AMDGPU_SI
4712 		case CHIP_VERDE:
4713 		case CHIP_TAHITI:
4714 		case CHIP_PITCAIRN:
4715 		case CHIP_OLAND:
4716 		case CHIP_HAINAN:
4717 #endif
4718 #ifdef CONFIG_DRM_AMDGPU_CIK
4719 		case CHIP_KAVERI:
4720 		case CHIP_KABINI:
4721 		case CHIP_MULLINS:
4722 #endif
4723 		case CHIP_CARRIZO:
4724 		case CHIP_STONEY:
4725 		case CHIP_CYAN_SKILLFISH:
4726 			goto disabled;
4727 		default:
4728 			break;
4729 		}
4730 	}
4731 
4732 	return true;
4733 
4734 disabled:
4735 		dev_info(adev->dev, "GPU recovery disabled.\n");
4736 		return false;
4737 }
4738 
4739 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4740 {
4741         u32 i;
4742         int ret = 0;
4743 
4744         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4745 
4746         dev_info(adev->dev, "GPU mode1 reset\n");
4747 
4748         /* disable BM */
4749         pci_clear_master(adev->pdev);
4750 
4751         amdgpu_device_cache_pci_state(adev->pdev);
4752 
4753         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4754                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4755                 ret = amdgpu_dpm_mode1_reset(adev);
4756         } else {
4757                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4758                 ret = psp_gpu_reset(adev);
4759         }
4760 
4761         if (ret)
4762                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4763 
4764         amdgpu_device_load_pci_state(adev->pdev);
4765 
4766         /* wait for asic to come out of reset */
4767         for (i = 0; i < adev->usec_timeout; i++) {
4768                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4769 
4770                 if (memsize != 0xffffffff)
4771                         break;
4772                 udelay(1);
4773         }
4774 
4775         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4776         return ret;
4777 }
4778 
4779 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4780 				 struct amdgpu_reset_context *reset_context)
4781 {
4782 	int i, r = 0;
4783 	struct amdgpu_job *job = NULL;
4784 	bool need_full_reset =
4785 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4786 
4787 	if (reset_context->reset_req_dev == adev)
4788 		job = reset_context->job;
4789 
4790 	if (amdgpu_sriov_vf(adev)) {
4791 		/* stop the data exchange thread */
4792 		amdgpu_virt_fini_data_exchange(adev);
4793 	}
4794 
4795 	amdgpu_fence_driver_isr_toggle(adev, true);
4796 
4797 	/* block all schedulers and reset given job's ring */
4798 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4799 		struct amdgpu_ring *ring = adev->rings[i];
4800 
4801 		if (!ring || !ring->sched.thread)
4802 			continue;
4803 
4804 		/*clear job fence from fence drv to avoid force_completion
4805 		 *leave NULL and vm flush fence in fence drv */
4806 		amdgpu_fence_driver_clear_job_fences(ring);
4807 
4808 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4809 		amdgpu_fence_driver_force_completion(ring);
4810 	}
4811 
4812 	amdgpu_fence_driver_isr_toggle(adev, false);
4813 
4814 	if (job && job->vm)
4815 		drm_sched_increase_karma(&job->base);
4816 
4817 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4818 	/* If reset handler not implemented, continue; otherwise return */
4819 	if (r == -ENOSYS)
4820 		r = 0;
4821 	else
4822 		return r;
4823 
4824 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4825 	if (!amdgpu_sriov_vf(adev)) {
4826 
4827 		if (!need_full_reset)
4828 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4829 
4830 		if (!need_full_reset && amdgpu_gpu_recovery) {
4831 			amdgpu_device_ip_pre_soft_reset(adev);
4832 			r = amdgpu_device_ip_soft_reset(adev);
4833 			amdgpu_device_ip_post_soft_reset(adev);
4834 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4835 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4836 				need_full_reset = true;
4837 			}
4838 		}
4839 
4840 		if (need_full_reset)
4841 			r = amdgpu_device_ip_suspend(adev);
4842 		if (need_full_reset)
4843 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4844 		else
4845 			clear_bit(AMDGPU_NEED_FULL_RESET,
4846 				  &reset_context->flags);
4847 	}
4848 
4849 	return r;
4850 }
4851 
4852 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4853 {
4854 	int i;
4855 
4856 	lockdep_assert_held(&adev->reset_domain->sem);
4857 
4858 	for (i = 0; i < adev->num_regs; i++) {
4859 		adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4860 		trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4861 					     adev->reset_dump_reg_value[i]);
4862 	}
4863 
4864 	return 0;
4865 }
4866 
4867 #ifdef CONFIG_DEV_COREDUMP
4868 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4869 		size_t count, void *data, size_t datalen)
4870 {
4871 	struct drm_printer p;
4872 	struct amdgpu_device *adev = data;
4873 	struct drm_print_iterator iter;
4874 	int i;
4875 
4876 	iter.data = buffer;
4877 	iter.offset = 0;
4878 	iter.start = offset;
4879 	iter.remain = count;
4880 
4881 	p = drm_coredump_printer(&iter);
4882 
4883 	drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4884 	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4885 	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4886 	drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4887 	if (adev->reset_task_info.pid)
4888 		drm_printf(&p, "process_name: %s PID: %d\n",
4889 			   adev->reset_task_info.process_name,
4890 			   adev->reset_task_info.pid);
4891 
4892 	if (adev->reset_vram_lost)
4893 		drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4894 	if (adev->num_regs) {
4895 		drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4896 
4897 		for (i = 0; i < adev->num_regs; i++)
4898 			drm_printf(&p, "0x%08x: 0x%08x\n",
4899 				   adev->reset_dump_reg_list[i],
4900 				   adev->reset_dump_reg_value[i]);
4901 	}
4902 
4903 	return count - iter.remain;
4904 }
4905 
4906 static void amdgpu_devcoredump_free(void *data)
4907 {
4908 }
4909 
4910 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4911 {
4912 	struct drm_device *dev = adev_to_drm(adev);
4913 
4914 	ktime_get_ts64(&adev->reset_time);
4915 	dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4916 		      amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4917 }
4918 #endif
4919 
4920 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4921 			 struct amdgpu_reset_context *reset_context)
4922 {
4923 	struct amdgpu_device *tmp_adev = NULL;
4924 	bool need_full_reset, skip_hw_reset, vram_lost = false;
4925 	int r = 0;
4926 	bool gpu_reset_for_dev_remove = 0;
4927 
4928 	/* Try reset handler method first */
4929 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4930 				    reset_list);
4931 	amdgpu_reset_reg_dumps(tmp_adev);
4932 
4933 	reset_context->reset_device_list = device_list_handle;
4934 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4935 	/* If reset handler not implemented, continue; otherwise return */
4936 	if (r == -ENOSYS)
4937 		r = 0;
4938 	else
4939 		return r;
4940 
4941 	/* Reset handler not implemented, use the default method */
4942 	need_full_reset =
4943 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4944 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4945 
4946 	gpu_reset_for_dev_remove =
4947 		test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4948 			test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4949 
4950 	/*
4951 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4952 	 * to allow proper links negotiation in FW (within 1 sec)
4953 	 */
4954 	if (!skip_hw_reset && need_full_reset) {
4955 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4956 			/* For XGMI run all resets in parallel to speed up the process */
4957 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4958 				tmp_adev->gmc.xgmi.pending_reset = false;
4959 				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4960 					r = -EALREADY;
4961 			} else
4962 				r = amdgpu_asic_reset(tmp_adev);
4963 
4964 			if (r) {
4965 				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4966 					 r, adev_to_drm(tmp_adev)->unique);
4967 				break;
4968 			}
4969 		}
4970 
4971 		/* For XGMI wait for all resets to complete before proceed */
4972 		if (!r) {
4973 			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4974 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4975 					flush_work(&tmp_adev->xgmi_reset_work);
4976 					r = tmp_adev->asic_reset_res;
4977 					if (r)
4978 						break;
4979 				}
4980 			}
4981 		}
4982 	}
4983 
4984 	if (!r && amdgpu_ras_intr_triggered()) {
4985 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4986 			if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4987 			    tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4988 				tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4989 		}
4990 
4991 		amdgpu_ras_intr_cleared();
4992 	}
4993 
4994 	/* Since the mode1 reset affects base ip blocks, the
4995 	 * phase1 ip blocks need to be resumed. Otherwise there
4996 	 * will be a BIOS signature error and the psp bootloader
4997 	 * can't load kdb on the next amdgpu install.
4998 	 */
4999 	if (gpu_reset_for_dev_remove) {
5000 		list_for_each_entry(tmp_adev, device_list_handle, reset_list)
5001 			amdgpu_device_ip_resume_phase1(tmp_adev);
5002 
5003 		goto end;
5004 	}
5005 
5006 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5007 		if (need_full_reset) {
5008 			/* post card */
5009 			r = amdgpu_device_asic_init(tmp_adev);
5010 			if (r) {
5011 				dev_warn(tmp_adev->dev, "asic atom init failed!");
5012 			} else {
5013 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5014 				r = amdgpu_amdkfd_resume_iommu(tmp_adev);
5015 				if (r)
5016 					goto out;
5017 
5018 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5019 				if (r)
5020 					goto out;
5021 
5022 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5023 #ifdef CONFIG_DEV_COREDUMP
5024 				tmp_adev->reset_vram_lost = vram_lost;
5025 				memset(&tmp_adev->reset_task_info, 0,
5026 						sizeof(tmp_adev->reset_task_info));
5027 				if (reset_context->job && reset_context->job->vm)
5028 					tmp_adev->reset_task_info =
5029 						reset_context->job->vm->task_info;
5030 				amdgpu_reset_capture_coredumpm(tmp_adev);
5031 #endif
5032 				if (vram_lost) {
5033 					DRM_INFO("VRAM is lost due to GPU reset!\n");
5034 					amdgpu_inc_vram_lost(tmp_adev);
5035 				}
5036 
5037 				r = amdgpu_device_fw_loading(tmp_adev);
5038 				if (r)
5039 					return r;
5040 
5041 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5042 				if (r)
5043 					goto out;
5044 
5045 				if (vram_lost)
5046 					amdgpu_device_fill_reset_magic(tmp_adev);
5047 
5048 				/*
5049 				 * Add this ASIC as tracked as reset was already
5050 				 * complete successfully.
5051 				 */
5052 				amdgpu_register_gpu_instance(tmp_adev);
5053 
5054 				if (!reset_context->hive &&
5055 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5056 					amdgpu_xgmi_add_device(tmp_adev);
5057 
5058 				r = amdgpu_device_ip_late_init(tmp_adev);
5059 				if (r)
5060 					goto out;
5061 
5062 				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5063 
5064 				/*
5065 				 * The GPU enters bad state once faulty pages
5066 				 * by ECC has reached the threshold, and ras
5067 				 * recovery is scheduled next. So add one check
5068 				 * here to break recovery if it indeed exceeds
5069 				 * bad page threshold, and remind user to
5070 				 * retire this GPU or setting one bigger
5071 				 * bad_page_threshold value to fix this once
5072 				 * probing driver again.
5073 				 */
5074 				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5075 					/* must succeed. */
5076 					amdgpu_ras_resume(tmp_adev);
5077 				} else {
5078 					r = -EINVAL;
5079 					goto out;
5080 				}
5081 
5082 				/* Update PSP FW topology after reset */
5083 				if (reset_context->hive &&
5084 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5085 					r = amdgpu_xgmi_update_topology(
5086 						reset_context->hive, tmp_adev);
5087 			}
5088 		}
5089 
5090 out:
5091 		if (!r) {
5092 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5093 			r = amdgpu_ib_ring_tests(tmp_adev);
5094 			if (r) {
5095 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5096 				need_full_reset = true;
5097 				r = -EAGAIN;
5098 				goto end;
5099 			}
5100 		}
5101 
5102 		if (!r)
5103 			r = amdgpu_device_recover_vram(tmp_adev);
5104 		else
5105 			tmp_adev->asic_reset_res = r;
5106 	}
5107 
5108 end:
5109 	if (need_full_reset)
5110 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5111 	else
5112 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5113 	return r;
5114 }
5115 
5116 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5117 {
5118 
5119 	switch (amdgpu_asic_reset_method(adev)) {
5120 	case AMD_RESET_METHOD_MODE1:
5121 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5122 		break;
5123 	case AMD_RESET_METHOD_MODE2:
5124 		adev->mp1_state = PP_MP1_STATE_RESET;
5125 		break;
5126 	default:
5127 		adev->mp1_state = PP_MP1_STATE_NONE;
5128 		break;
5129 	}
5130 
5131 	pci_dev_put(p);
5132 }
5133 
5134 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5135 {
5136 	amdgpu_vf_error_trans_all(adev);
5137 	adev->mp1_state = PP_MP1_STATE_NONE;
5138 }
5139 
5140 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5141 {
5142 	STUB();
5143 #ifdef notyet
5144 	struct pci_dev *p = NULL;
5145 
5146 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5147 			adev->pdev->bus->number, 1);
5148 	if (p) {
5149 		pm_runtime_enable(&(p->dev));
5150 		pm_runtime_resume(&(p->dev));
5151 	}
5152 #endif
5153 }
5154 
5155 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5156 {
5157 	enum amd_reset_method reset_method;
5158 	struct pci_dev *p = NULL;
5159 	u64 expires;
5160 
5161 	/*
5162 	 * For now, only BACO and mode1 reset are confirmed
5163 	 * to suffer the audio issue without proper suspended.
5164 	 */
5165 	reset_method = amdgpu_asic_reset_method(adev);
5166 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5167 	     (reset_method != AMD_RESET_METHOD_MODE1))
5168 		return -EINVAL;
5169 
5170 	STUB();
5171 	return -ENOSYS;
5172 #ifdef notyet
5173 
5174 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5175 			adev->pdev->bus->number, 1);
5176 	if (!p)
5177 		return -ENODEV;
5178 
5179 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5180 	if (!expires)
5181 		/*
5182 		 * If we cannot get the audio device autosuspend delay,
5183 		 * a fixed 4S interval will be used. Considering 3S is
5184 		 * the audio controller default autosuspend delay setting.
5185 		 * 4S used here is guaranteed to cover that.
5186 		 */
5187 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5188 
5189 	while (!pm_runtime_status_suspended(&(p->dev))) {
5190 		if (!pm_runtime_suspend(&(p->dev)))
5191 			break;
5192 
5193 		if (expires < ktime_get_mono_fast_ns()) {
5194 			dev_warn(adev->dev, "failed to suspend display audio\n");
5195 			pci_dev_put(p);
5196 			/* TODO: abort the succeeding gpu reset? */
5197 			return -ETIMEDOUT;
5198 		}
5199 	}
5200 
5201 	pm_runtime_disable(&(p->dev));
5202 
5203 	pci_dev_put(p);
5204 	return 0;
5205 #endif
5206 }
5207 
5208 static void amdgpu_device_recheck_guilty_jobs(
5209 	struct amdgpu_device *adev, struct list_head *device_list_handle,
5210 	struct amdgpu_reset_context *reset_context)
5211 {
5212 	int i, r = 0;
5213 
5214 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5215 		struct amdgpu_ring *ring = adev->rings[i];
5216 		int ret = 0;
5217 		struct drm_sched_job *s_job;
5218 
5219 		if (!ring || !ring->sched.thread)
5220 			continue;
5221 
5222 		s_job = list_first_entry_or_null(&ring->sched.pending_list,
5223 				struct drm_sched_job, list);
5224 		if (s_job == NULL)
5225 			continue;
5226 
5227 		/* clear job's guilty and depend the folowing step to decide the real one */
5228 		drm_sched_reset_karma(s_job);
5229 		drm_sched_resubmit_jobs_ext(&ring->sched, 1);
5230 
5231 		if (!s_job->s_fence->parent) {
5232 			DRM_WARN("Failed to get a HW fence for job!");
5233 			continue;
5234 		}
5235 
5236 		ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
5237 		if (ret == 0) { /* timeout */
5238 			DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
5239 						ring->sched.name, s_job->id);
5240 
5241 
5242 			amdgpu_fence_driver_isr_toggle(adev, true);
5243 
5244 			/* Clear this failed job from fence array */
5245 			amdgpu_fence_driver_clear_job_fences(ring);
5246 
5247 			amdgpu_fence_driver_isr_toggle(adev, false);
5248 
5249 			/* Since the job won't signal and we go for
5250 			 * another resubmit drop this parent pointer
5251 			 */
5252 			dma_fence_put(s_job->s_fence->parent);
5253 			s_job->s_fence->parent = NULL;
5254 
5255 			/* set guilty */
5256 			drm_sched_increase_karma(s_job);
5257 			amdgpu_reset_prepare_hwcontext(adev, reset_context);
5258 retry:
5259 			/* do hw reset */
5260 			if (amdgpu_sriov_vf(adev)) {
5261 				amdgpu_virt_fini_data_exchange(adev);
5262 				r = amdgpu_device_reset_sriov(adev, false);
5263 				if (r)
5264 					adev->asic_reset_res = r;
5265 			} else {
5266 				clear_bit(AMDGPU_SKIP_HW_RESET,
5267 					  &reset_context->flags);
5268 				r = amdgpu_do_asic_reset(device_list_handle,
5269 							 reset_context);
5270 				if (r && r == -EAGAIN)
5271 					goto retry;
5272 			}
5273 
5274 			/*
5275 			 * add reset counter so that the following
5276 			 * resubmitted job could flush vmid
5277 			 */
5278 			atomic_inc(&adev->gpu_reset_counter);
5279 			continue;
5280 		}
5281 
5282 		/* got the hw fence, signal finished fence */
5283 		atomic_dec(ring->sched.score);
5284 		dma_fence_get(&s_job->s_fence->finished);
5285 		dma_fence_signal(&s_job->s_fence->finished);
5286 		dma_fence_put(&s_job->s_fence->finished);
5287 
5288 		/* remove node from list and free the job */
5289 		spin_lock(&ring->sched.job_list_lock);
5290 		list_del_init(&s_job->list);
5291 		spin_unlock(&ring->sched.job_list_lock);
5292 		ring->sched.ops->free_job(s_job);
5293 	}
5294 }
5295 
5296 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5297 {
5298 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5299 
5300 #if defined(CONFIG_DEBUG_FS)
5301 	if (!amdgpu_sriov_vf(adev))
5302 		cancel_work(&adev->reset_work);
5303 #endif
5304 
5305 	if (adev->kfd.dev)
5306 		cancel_work(&adev->kfd.reset_work);
5307 
5308 	if (amdgpu_sriov_vf(adev))
5309 		cancel_work(&adev->virt.flr_work);
5310 
5311 	if (con && adev->ras_enabled)
5312 		cancel_work(&con->recovery_work);
5313 
5314 }
5315 
5316 
5317 /**
5318  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5319  *
5320  * @adev: amdgpu_device pointer
5321  * @job: which job trigger hang
5322  *
5323  * Attempt to reset the GPU if it has hung (all asics).
5324  * Attempt to do soft-reset or full-reset and reinitialize Asic
5325  * Returns 0 for success or an error on failure.
5326  */
5327 
5328 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5329 			      struct amdgpu_job *job,
5330 			      struct amdgpu_reset_context *reset_context)
5331 {
5332 	struct list_head device_list, *device_list_handle =  NULL;
5333 	bool job_signaled = false;
5334 	struct amdgpu_hive_info *hive = NULL;
5335 	struct amdgpu_device *tmp_adev = NULL;
5336 	int i, r = 0;
5337 	bool need_emergency_restart = false;
5338 	bool audio_suspended = false;
5339 	int tmp_vram_lost_counter;
5340 	bool gpu_reset_for_dev_remove = false;
5341 
5342 	gpu_reset_for_dev_remove =
5343 			test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5344 				test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5345 
5346 	/*
5347 	 * Special case: RAS triggered and full reset isn't supported
5348 	 */
5349 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5350 
5351 	/*
5352 	 * Flush RAM to disk so that after reboot
5353 	 * the user can read log and see why the system rebooted.
5354 	 */
5355 	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5356 		DRM_WARN("Emergency reboot.");
5357 
5358 #ifdef notyet
5359 		ksys_sync_helper();
5360 		emergency_restart();
5361 #else
5362 		panic("emergency_restart");
5363 #endif
5364 	}
5365 
5366 	dev_info(adev->dev, "GPU %s begin!\n",
5367 		need_emergency_restart ? "jobs stop":"reset");
5368 
5369 	if (!amdgpu_sriov_vf(adev))
5370 		hive = amdgpu_get_xgmi_hive(adev);
5371 	if (hive)
5372 		mutex_lock(&hive->hive_lock);
5373 
5374 	reset_context->job = job;
5375 	reset_context->hive = hive;
5376 	/*
5377 	 * Build list of devices to reset.
5378 	 * In case we are in XGMI hive mode, resort the device list
5379 	 * to put adev in the 1st position.
5380 	 */
5381 	INIT_LIST_HEAD(&device_list);
5382 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5383 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5384 			list_add_tail(&tmp_adev->reset_list, &device_list);
5385 			if (gpu_reset_for_dev_remove && adev->shutdown)
5386 				tmp_adev->shutdown = true;
5387 		}
5388 		if (!list_is_first(&adev->reset_list, &device_list))
5389 			list_rotate_to_front(&adev->reset_list, &device_list);
5390 		device_list_handle = &device_list;
5391 	} else {
5392 		list_add_tail(&adev->reset_list, &device_list);
5393 		device_list_handle = &device_list;
5394 	}
5395 
5396 	/* We need to lock reset domain only once both for XGMI and single device */
5397 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5398 				    reset_list);
5399 	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5400 
5401 	/* block all schedulers and reset given job's ring */
5402 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5403 
5404 		amdgpu_device_set_mp1_state(tmp_adev);
5405 
5406 		/*
5407 		 * Try to put the audio codec into suspend state
5408 		 * before gpu reset started.
5409 		 *
5410 		 * Due to the power domain of the graphics device
5411 		 * is shared with AZ power domain. Without this,
5412 		 * we may change the audio hardware from behind
5413 		 * the audio driver's back. That will trigger
5414 		 * some audio codec errors.
5415 		 */
5416 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5417 			audio_suspended = true;
5418 
5419 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5420 
5421 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5422 
5423 		if (!amdgpu_sriov_vf(tmp_adev))
5424 			amdgpu_amdkfd_pre_reset(tmp_adev);
5425 
5426 		/*
5427 		 * Mark these ASICs to be reseted as untracked first
5428 		 * And add them back after reset completed
5429 		 */
5430 		amdgpu_unregister_gpu_instance(tmp_adev);
5431 
5432 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5433 
5434 		/* disable ras on ALL IPs */
5435 		if (!need_emergency_restart &&
5436 		      amdgpu_device_ip_need_full_reset(tmp_adev))
5437 			amdgpu_ras_suspend(tmp_adev);
5438 
5439 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5440 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5441 
5442 			if (!ring || !ring->sched.thread)
5443 				continue;
5444 
5445 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5446 
5447 			if (need_emergency_restart)
5448 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5449 		}
5450 		atomic_inc(&tmp_adev->gpu_reset_counter);
5451 	}
5452 
5453 	if (need_emergency_restart)
5454 		goto skip_sched_resume;
5455 
5456 	/*
5457 	 * Must check guilty signal here since after this point all old
5458 	 * HW fences are force signaled.
5459 	 *
5460 	 * job->base holds a reference to parent fence
5461 	 */
5462 	if (job && dma_fence_is_signaled(&job->hw_fence)) {
5463 		job_signaled = true;
5464 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5465 		goto skip_hw_reset;
5466 	}
5467 
5468 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5469 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5470 		if (gpu_reset_for_dev_remove) {
5471 			/* Workaroud for ASICs need to disable SMC first */
5472 			amdgpu_device_smu_fini_early(tmp_adev);
5473 		}
5474 		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5475 		/*TODO Should we stop ?*/
5476 		if (r) {
5477 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5478 				  r, adev_to_drm(tmp_adev)->unique);
5479 			tmp_adev->asic_reset_res = r;
5480 		}
5481 
5482 		/*
5483 		 * Drop all pending non scheduler resets. Scheduler resets
5484 		 * were already dropped during drm_sched_stop
5485 		 */
5486 		amdgpu_device_stop_pending_resets(tmp_adev);
5487 	}
5488 
5489 	tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5490 	/* Actual ASIC resets if needed.*/
5491 	/* Host driver will handle XGMI hive reset for SRIOV */
5492 	if (amdgpu_sriov_vf(adev)) {
5493 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5494 		if (r)
5495 			adev->asic_reset_res = r;
5496 
5497 		/* Aldebaran supports ras in SRIOV, so need resume ras during reset */
5498 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
5499 			amdgpu_ras_resume(adev);
5500 	} else {
5501 		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5502 		if (r && r == -EAGAIN)
5503 			goto retry;
5504 
5505 		if (!r && gpu_reset_for_dev_remove)
5506 			goto recover_end;
5507 	}
5508 
5509 skip_hw_reset:
5510 
5511 	/* Post ASIC reset for all devs .*/
5512 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5513 
5514 		/*
5515 		 * Sometimes a later bad compute job can block a good gfx job as gfx
5516 		 * and compute ring share internal GC HW mutually. We add an additional
5517 		 * guilty jobs recheck step to find the real guilty job, it synchronously
5518 		 * submits and pends for the first job being signaled. If it gets timeout,
5519 		 * we identify it as a real guilty job.
5520 		 */
5521 		if (amdgpu_gpu_recovery == 2 &&
5522 			!(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5523 			amdgpu_device_recheck_guilty_jobs(
5524 				tmp_adev, device_list_handle, reset_context);
5525 
5526 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5527 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5528 
5529 			if (!ring || !ring->sched.thread)
5530 				continue;
5531 
5532 			/* No point to resubmit jobs if we didn't HW reset*/
5533 			if (!tmp_adev->asic_reset_res && !job_signaled)
5534 				drm_sched_resubmit_jobs(&ring->sched);
5535 
5536 			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5537 		}
5538 
5539 		if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5540 			amdgpu_mes_self_test(tmp_adev);
5541 
5542 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5543 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5544 		}
5545 
5546 		if (tmp_adev->asic_reset_res)
5547 			r = tmp_adev->asic_reset_res;
5548 
5549 		tmp_adev->asic_reset_res = 0;
5550 
5551 		if (r) {
5552 			/* bad news, how to tell it to userspace ? */
5553 			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5554 			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5555 		} else {
5556 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5557 			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5558 				DRM_WARN("smart shift update failed\n");
5559 		}
5560 	}
5561 
5562 skip_sched_resume:
5563 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5564 		/* unlock kfd: SRIOV would do it separately */
5565 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5566 			amdgpu_amdkfd_post_reset(tmp_adev);
5567 
5568 		/* kfd_post_reset will do nothing if kfd device is not initialized,
5569 		 * need to bring up kfd here if it's not be initialized before
5570 		 */
5571 		if (!adev->kfd.init_complete)
5572 			amdgpu_amdkfd_device_init(adev);
5573 
5574 		if (audio_suspended)
5575 			amdgpu_device_resume_display_audio(tmp_adev);
5576 
5577 		amdgpu_device_unset_mp1_state(tmp_adev);
5578 	}
5579 
5580 recover_end:
5581 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5582 					    reset_list);
5583 	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5584 
5585 	if (hive) {
5586 		mutex_unlock(&hive->hive_lock);
5587 		amdgpu_put_xgmi_hive(hive);
5588 	}
5589 
5590 	if (r)
5591 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5592 
5593 	atomic_set(&adev->reset_domain->reset_res, r);
5594 	return r;
5595 }
5596 
5597 /**
5598  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5599  *
5600  * @adev: amdgpu_device pointer
5601  *
5602  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5603  * and lanes) of the slot the device is in. Handles APUs and
5604  * virtualized environments where PCIE config space may not be available.
5605  */
5606 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5607 {
5608 	struct pci_dev *pdev;
5609 	enum pci_bus_speed speed_cap, platform_speed_cap;
5610 	enum pcie_link_width platform_link_width;
5611 
5612 	if (amdgpu_pcie_gen_cap)
5613 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5614 
5615 	if (amdgpu_pcie_lane_cap)
5616 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5617 
5618 	/* covers APUs as well */
5619 	if (pci_is_root_bus(adev->pdev->bus)) {
5620 		if (adev->pm.pcie_gen_mask == 0)
5621 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5622 		if (adev->pm.pcie_mlw_mask == 0)
5623 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5624 		return;
5625 	}
5626 
5627 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5628 		return;
5629 
5630 	pcie_bandwidth_available(adev->pdev, NULL,
5631 				 &platform_speed_cap, &platform_link_width);
5632 
5633 	if (adev->pm.pcie_gen_mask == 0) {
5634 		/* asic caps */
5635 		pdev = adev->pdev;
5636 		speed_cap = pcie_get_speed_cap(pdev);
5637 		if (speed_cap == PCI_SPEED_UNKNOWN) {
5638 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5639 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5640 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5641 		} else {
5642 			if (speed_cap == PCIE_SPEED_32_0GT)
5643 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5644 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5645 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5646 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5647 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5648 			else if (speed_cap == PCIE_SPEED_16_0GT)
5649 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5650 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5651 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5652 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5653 			else if (speed_cap == PCIE_SPEED_8_0GT)
5654 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5655 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5656 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5657 			else if (speed_cap == PCIE_SPEED_5_0GT)
5658 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5659 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5660 			else
5661 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5662 		}
5663 		/* platform caps */
5664 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5665 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5666 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5667 		} else {
5668 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5669 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5670 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5671 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5672 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5673 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5674 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5675 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5676 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5677 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5678 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5679 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5680 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5681 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5682 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5683 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5684 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5685 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5686 			else
5687 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5688 
5689 		}
5690 	}
5691 	if (adev->pm.pcie_mlw_mask == 0) {
5692 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5693 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5694 		} else {
5695 			switch (platform_link_width) {
5696 			case PCIE_LNK_X32:
5697 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5698 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5699 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5700 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5701 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5702 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5703 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5704 				break;
5705 			case PCIE_LNK_X16:
5706 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5707 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5708 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5709 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5710 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5711 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5712 				break;
5713 			case PCIE_LNK_X12:
5714 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5715 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5716 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5717 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5718 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5719 				break;
5720 			case PCIE_LNK_X8:
5721 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5722 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5723 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5724 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5725 				break;
5726 			case PCIE_LNK_X4:
5727 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5728 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5729 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5730 				break;
5731 			case PCIE_LNK_X2:
5732 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5733 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5734 				break;
5735 			case PCIE_LNK_X1:
5736 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5737 				break;
5738 			default:
5739 				break;
5740 			}
5741 		}
5742 	}
5743 }
5744 
5745 /**
5746  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5747  *
5748  * @adev: amdgpu_device pointer
5749  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5750  *
5751  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5752  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5753  * @peer_adev.
5754  */
5755 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5756 				      struct amdgpu_device *peer_adev)
5757 {
5758 #ifdef CONFIG_HSA_AMD_P2P
5759 	uint64_t address_mask = peer_adev->dev->dma_mask ?
5760 		~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5761 	resource_size_t aper_limit =
5762 		adev->gmc.aper_base + adev->gmc.aper_size - 1;
5763 	bool p2p_access =
5764 		!adev->gmc.xgmi.connected_to_cpu &&
5765 		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5766 
5767 	return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5768 		adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5769 		!(adev->gmc.aper_base & address_mask ||
5770 		  aper_limit & address_mask));
5771 #else
5772 	return false;
5773 #endif
5774 }
5775 
5776 int amdgpu_device_baco_enter(struct drm_device *dev)
5777 {
5778 	struct amdgpu_device *adev = drm_to_adev(dev);
5779 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5780 
5781 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5782 		return -ENOTSUPP;
5783 
5784 	if (ras && adev->ras_enabled &&
5785 	    adev->nbio.funcs->enable_doorbell_interrupt)
5786 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5787 
5788 	return amdgpu_dpm_baco_enter(adev);
5789 }
5790 
5791 int amdgpu_device_baco_exit(struct drm_device *dev)
5792 {
5793 	struct amdgpu_device *adev = drm_to_adev(dev);
5794 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5795 	int ret = 0;
5796 
5797 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5798 		return -ENOTSUPP;
5799 
5800 	ret = amdgpu_dpm_baco_exit(adev);
5801 	if (ret)
5802 		return ret;
5803 
5804 	if (ras && adev->ras_enabled &&
5805 	    adev->nbio.funcs->enable_doorbell_interrupt)
5806 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5807 
5808 	if (amdgpu_passthrough(adev) &&
5809 	    adev->nbio.funcs->clear_doorbell_interrupt)
5810 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
5811 
5812 	return 0;
5813 }
5814 
5815 /**
5816  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5817  * @pdev: PCI device struct
5818  * @state: PCI channel state
5819  *
5820  * Description: Called when a PCI error is detected.
5821  *
5822  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5823  */
5824 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5825 {
5826 	STUB();
5827 	return 0;
5828 #ifdef notyet
5829 	struct drm_device *dev = pci_get_drvdata(pdev);
5830 	struct amdgpu_device *adev = drm_to_adev(dev);
5831 	int i;
5832 
5833 	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5834 
5835 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5836 		DRM_WARN("No support for XGMI hive yet...");
5837 		return PCI_ERS_RESULT_DISCONNECT;
5838 	}
5839 
5840 	adev->pci_channel_state = state;
5841 
5842 	switch (state) {
5843 	case pci_channel_io_normal:
5844 		return PCI_ERS_RESULT_CAN_RECOVER;
5845 	/* Fatal error, prepare for slot reset */
5846 	case pci_channel_io_frozen:
5847 		/*
5848 		 * Locking adev->reset_domain->sem will prevent any external access
5849 		 * to GPU during PCI error recovery
5850 		 */
5851 		amdgpu_device_lock_reset_domain(adev->reset_domain);
5852 		amdgpu_device_set_mp1_state(adev);
5853 
5854 		/*
5855 		 * Block any work scheduling as we do for regular GPU reset
5856 		 * for the duration of the recovery
5857 		 */
5858 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5859 			struct amdgpu_ring *ring = adev->rings[i];
5860 
5861 			if (!ring || !ring->sched.thread)
5862 				continue;
5863 
5864 			drm_sched_stop(&ring->sched, NULL);
5865 		}
5866 		atomic_inc(&adev->gpu_reset_counter);
5867 		return PCI_ERS_RESULT_NEED_RESET;
5868 	case pci_channel_io_perm_failure:
5869 		/* Permanent error, prepare for device removal */
5870 		return PCI_ERS_RESULT_DISCONNECT;
5871 	}
5872 
5873 	return PCI_ERS_RESULT_NEED_RESET;
5874 #endif
5875 }
5876 
5877 /**
5878  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5879  * @pdev: pointer to PCI device
5880  */
5881 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5882 {
5883 
5884 	DRM_INFO("PCI error: mmio enabled callback!!\n");
5885 
5886 	/* TODO - dump whatever for debugging purposes */
5887 
5888 	/* This called only if amdgpu_pci_error_detected returns
5889 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5890 	 * works, no need to reset slot.
5891 	 */
5892 
5893 	return PCI_ERS_RESULT_RECOVERED;
5894 }
5895 
5896 /**
5897  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5898  * @pdev: PCI device struct
5899  *
5900  * Description: This routine is called by the pci error recovery
5901  * code after the PCI slot has been reset, just before we
5902  * should resume normal operations.
5903  */
5904 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5905 {
5906 	STUB();
5907 	return PCI_ERS_RESULT_RECOVERED;
5908 #ifdef notyet
5909 	struct drm_device *dev = pci_get_drvdata(pdev);
5910 	struct amdgpu_device *adev = drm_to_adev(dev);
5911 	int r, i;
5912 	struct amdgpu_reset_context reset_context;
5913 	u32 memsize;
5914 	struct list_head device_list;
5915 
5916 	DRM_INFO("PCI error: slot reset callback!!\n");
5917 
5918 	memset(&reset_context, 0, sizeof(reset_context));
5919 
5920 	INIT_LIST_HEAD(&device_list);
5921 	list_add_tail(&adev->reset_list, &device_list);
5922 
5923 	/* wait for asic to come out of reset */
5924 	drm_msleep(500);
5925 
5926 	/* Restore PCI confspace */
5927 	amdgpu_device_load_pci_state(pdev);
5928 
5929 	/* confirm  ASIC came out of reset */
5930 	for (i = 0; i < adev->usec_timeout; i++) {
5931 		memsize = amdgpu_asic_get_config_memsize(adev);
5932 
5933 		if (memsize != 0xffffffff)
5934 			break;
5935 		udelay(1);
5936 	}
5937 	if (memsize == 0xffffffff) {
5938 		r = -ETIME;
5939 		goto out;
5940 	}
5941 
5942 	reset_context.method = AMD_RESET_METHOD_NONE;
5943 	reset_context.reset_req_dev = adev;
5944 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5945 	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5946 
5947 	adev->no_hw_access = true;
5948 	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5949 	adev->no_hw_access = false;
5950 	if (r)
5951 		goto out;
5952 
5953 	r = amdgpu_do_asic_reset(&device_list, &reset_context);
5954 
5955 out:
5956 	if (!r) {
5957 		if (amdgpu_device_cache_pci_state(adev->pdev))
5958 			pci_restore_state(adev->pdev);
5959 
5960 		DRM_INFO("PCIe error recovery succeeded\n");
5961 	} else {
5962 		DRM_ERROR("PCIe error recovery failed, err:%d", r);
5963 		amdgpu_device_unset_mp1_state(adev);
5964 		amdgpu_device_unlock_reset_domain(adev->reset_domain);
5965 	}
5966 
5967 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5968 #endif
5969 }
5970 
5971 /**
5972  * amdgpu_pci_resume() - resume normal ops after PCI reset
5973  * @pdev: pointer to PCI device
5974  *
5975  * Called when the error recovery driver tells us that its
5976  * OK to resume normal operation.
5977  */
5978 void amdgpu_pci_resume(struct pci_dev *pdev)
5979 {
5980 	STUB();
5981 #ifdef notyet
5982 	struct drm_device *dev = pci_get_drvdata(pdev);
5983 	struct amdgpu_device *adev = drm_to_adev(dev);
5984 	int i;
5985 
5986 
5987 	DRM_INFO("PCI error: resume callback!!\n");
5988 
5989 	/* Only continue execution for the case of pci_channel_io_frozen */
5990 	if (adev->pci_channel_state != pci_channel_io_frozen)
5991 		return;
5992 
5993 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5994 		struct amdgpu_ring *ring = adev->rings[i];
5995 
5996 		if (!ring || !ring->sched.thread)
5997 			continue;
5998 
5999 
6000 		drm_sched_resubmit_jobs(&ring->sched);
6001 		drm_sched_start(&ring->sched, true);
6002 	}
6003 
6004 	amdgpu_device_unset_mp1_state(adev);
6005 	amdgpu_device_unlock_reset_domain(adev->reset_domain);
6006 #endif
6007 }
6008 
6009 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6010 {
6011 	return false;
6012 #ifdef notyet
6013 	struct drm_device *dev = pci_get_drvdata(pdev);
6014 	struct amdgpu_device *adev = drm_to_adev(dev);
6015 	int r;
6016 
6017 	r = pci_save_state(pdev);
6018 	if (!r) {
6019 		kfree(adev->pci_state);
6020 
6021 		adev->pci_state = pci_store_saved_state(pdev);
6022 
6023 		if (!adev->pci_state) {
6024 			DRM_ERROR("Failed to store PCI saved state");
6025 			return false;
6026 		}
6027 	} else {
6028 		DRM_WARN("Failed to save PCI state, err:%d\n", r);
6029 		return false;
6030 	}
6031 
6032 	return true;
6033 #endif
6034 }
6035 
6036 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6037 {
6038 	STUB();
6039 	return false;
6040 #ifdef notyet
6041 	struct drm_device *dev = pci_get_drvdata(pdev);
6042 	struct amdgpu_device *adev = drm_to_adev(dev);
6043 	int r;
6044 
6045 	if (!adev->pci_state)
6046 		return false;
6047 
6048 	r = pci_load_saved_state(pdev, adev->pci_state);
6049 
6050 	if (!r) {
6051 		pci_restore_state(pdev);
6052 	} else {
6053 		DRM_WARN("Failed to load PCI state, err:%d\n", r);
6054 		return false;
6055 	}
6056 
6057 	return true;
6058 #endif
6059 }
6060 
6061 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6062 		struct amdgpu_ring *ring)
6063 {
6064 #ifdef CONFIG_X86_64
6065 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6066 		return;
6067 #endif
6068 	if (adev->gmc.xgmi.connected_to_cpu)
6069 		return;
6070 
6071 	if (ring && ring->funcs->emit_hdp_flush)
6072 		amdgpu_ring_emit_hdp_flush(ring);
6073 	else
6074 		amdgpu_asic_flush_hdp(adev, ring);
6075 }
6076 
6077 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6078 		struct amdgpu_ring *ring)
6079 {
6080 #ifdef CONFIG_X86_64
6081 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6082 		return;
6083 #endif
6084 	if (adev->gmc.xgmi.connected_to_cpu)
6085 		return;
6086 
6087 	amdgpu_asic_invalidate_hdp(adev, ring);
6088 }
6089 
6090 int amdgpu_in_reset(struct amdgpu_device *adev)
6091 {
6092 	return atomic_read(&adev->reset_domain->in_gpu_reset);
6093 	}
6094 
6095 /**
6096  * amdgpu_device_halt() - bring hardware to some kind of halt state
6097  *
6098  * @adev: amdgpu_device pointer
6099  *
6100  * Bring hardware to some kind of halt state so that no one can touch it
6101  * any more. It will help to maintain error context when error occurred.
6102  * Compare to a simple hang, the system will keep stable at least for SSH
6103  * access. Then it should be trivial to inspect the hardware state and
6104  * see what's going on. Implemented as following:
6105  *
6106  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6107  *    clears all CPU mappings to device, disallows remappings through page faults
6108  * 2. amdgpu_irq_disable_all() disables all interrupts
6109  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6110  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6111  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6112  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6113  *    flush any in flight DMA operations
6114  */
6115 void amdgpu_device_halt(struct amdgpu_device *adev)
6116 {
6117 	struct pci_dev *pdev = adev->pdev;
6118 	struct drm_device *ddev = adev_to_drm(adev);
6119 
6120 	drm_dev_unplug(ddev);
6121 
6122 	amdgpu_irq_disable_all(adev);
6123 
6124 	amdgpu_fence_driver_hw_fini(adev);
6125 
6126 	adev->no_hw_access = true;
6127 
6128 	amdgpu_device_unmap_mmio(adev);
6129 
6130 	pci_disable_device(pdev);
6131 	pci_wait_for_pending_transaction(pdev);
6132 }
6133 
6134 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6135 				u32 reg)
6136 {
6137 	unsigned long flags, address, data;
6138 	u32 r;
6139 
6140 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6141 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6142 
6143 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6144 	WREG32(address, reg * 4);
6145 	(void)RREG32(address);
6146 	r = RREG32(data);
6147 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6148 	return r;
6149 }
6150 
6151 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6152 				u32 reg, u32 v)
6153 {
6154 	unsigned long flags, address, data;
6155 
6156 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6157 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6158 
6159 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6160 	WREG32(address, reg * 4);
6161 	(void)RREG32(address);
6162 	WREG32(data, v);
6163 	(void)RREG32(data);
6164 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6165 }
6166 
6167 /**
6168  * amdgpu_device_switch_gang - switch to a new gang
6169  * @adev: amdgpu_device pointer
6170  * @gang: the gang to switch to
6171  *
6172  * Try to switch to a new gang.
6173  * Returns: NULL if we switched to the new gang or a reference to the current
6174  * gang leader.
6175  */
6176 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6177 					    struct dma_fence *gang)
6178 {
6179 	struct dma_fence *old = NULL;
6180 
6181 	do {
6182 		dma_fence_put(old);
6183 		rcu_read_lock();
6184 		old = dma_fence_get_rcu_safe(&adev->gang_submit);
6185 		rcu_read_unlock();
6186 
6187 		if (old == gang)
6188 			break;
6189 
6190 		if (!dma_fence_is_signaled(old))
6191 			return old;
6192 
6193 	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6194 			 old, gang) != old);
6195 
6196 	dma_fence_put(old);
6197 	return NULL;
6198 }
6199 
6200 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6201 {
6202 	switch (adev->asic_type) {
6203 #ifdef CONFIG_DRM_AMDGPU_SI
6204 	case CHIP_HAINAN:
6205 #endif
6206 	case CHIP_TOPAZ:
6207 		/* chips with no display hardware */
6208 		return false;
6209 #ifdef CONFIG_DRM_AMDGPU_SI
6210 	case CHIP_TAHITI:
6211 	case CHIP_PITCAIRN:
6212 	case CHIP_VERDE:
6213 	case CHIP_OLAND:
6214 #endif
6215 #ifdef CONFIG_DRM_AMDGPU_CIK
6216 	case CHIP_BONAIRE:
6217 	case CHIP_HAWAII:
6218 	case CHIP_KAVERI:
6219 	case CHIP_KABINI:
6220 	case CHIP_MULLINS:
6221 #endif
6222 	case CHIP_TONGA:
6223 	case CHIP_FIJI:
6224 	case CHIP_POLARIS10:
6225 	case CHIP_POLARIS11:
6226 	case CHIP_POLARIS12:
6227 	case CHIP_VEGAM:
6228 	case CHIP_CARRIZO:
6229 	case CHIP_STONEY:
6230 		/* chips with display hardware */
6231 		return true;
6232 	default:
6233 		/* IP discovery */
6234 		if (!adev->ip_versions[DCE_HWIP][0] ||
6235 		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6236 			return false;
6237 		return true;
6238 	}
6239 }
6240