xref: /dflybsd-src/sys/dev/drm/amd/amdgpu/gmc_v9_0.c (revision ef7b4fc3bb9842035aa6c33660f77dca8bf8fe6c)
1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev  * Copyright 2016 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev  *
4b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
5b843c749SSergey Zigachev  * copy of this software and associated documentation files (the "Software"),
6b843c749SSergey Zigachev  * to deal in the Software without restriction, including without limitation
7b843c749SSergey Zigachev  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b843c749SSergey Zigachev  * and/or sell copies of the Software, and to permit persons to whom the
9b843c749SSergey Zigachev  * Software is furnished to do so, subject to the following conditions:
10b843c749SSergey Zigachev  *
11b843c749SSergey Zigachev  * The above copyright notice and this permission notice shall be included in
12b843c749SSergey Zigachev  * all copies or substantial portions of the Software.
13b843c749SSergey Zigachev  *
14b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17b843c749SSergey Zigachev  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18b843c749SSergey Zigachev  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19b843c749SSergey Zigachev  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20b843c749SSergey Zigachev  * OTHER DEALINGS IN THE SOFTWARE.
21b843c749SSergey Zigachev  *
22b843c749SSergey Zigachev  */
23b843c749SSergey Zigachev #include <linux/firmware.h>
24b843c749SSergey Zigachev #include <drm/drm_cache.h>
25b843c749SSergey Zigachev #include "amdgpu.h"
26b843c749SSergey Zigachev #include "gmc_v9_0.h"
27b843c749SSergey Zigachev #include "amdgpu_atomfirmware.h"
28b843c749SSergey Zigachev 
29b843c749SSergey Zigachev #include "hdp/hdp_4_0_offset.h"
30b843c749SSergey Zigachev #include "hdp/hdp_4_0_sh_mask.h"
31b843c749SSergey Zigachev #include "gc/gc_9_0_sh_mask.h"
32b843c749SSergey Zigachev #include "dce/dce_12_0_offset.h"
33b843c749SSergey Zigachev #include "dce/dce_12_0_sh_mask.h"
34b843c749SSergey Zigachev #include "vega10_enum.h"
35b843c749SSergey Zigachev #include "mmhub/mmhub_1_0_offset.h"
36b843c749SSergey Zigachev #include "athub/athub_1_0_offset.h"
37b843c749SSergey Zigachev #include "oss/osssys_4_0_offset.h"
38b843c749SSergey Zigachev 
39b843c749SSergey Zigachev #include "soc15.h"
40b843c749SSergey Zigachev #include "soc15_common.h"
41b843c749SSergey Zigachev #include "umc/umc_6_0_sh_mask.h"
42b843c749SSergey Zigachev 
43b843c749SSergey Zigachev #include "gfxhub_v1_0.h"
44b843c749SSergey Zigachev #include "mmhub_v1_0.h"
45b843c749SSergey Zigachev 
46b843c749SSergey Zigachev #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
47b843c749SSergey Zigachev 
48b843c749SSergey Zigachev /* add these here since we already include dce12 headers and these are for DCN */
49b843c749SSergey Zigachev #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
50b843c749SSergey Zigachev #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
51b843c749SSergey Zigachev #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
52b843c749SSergey Zigachev #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
53b843c749SSergey Zigachev #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
54b843c749SSergey Zigachev #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
55b843c749SSergey Zigachev 
56b843c749SSergey Zigachev /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
57b843c749SSergey Zigachev #define AMDGPU_NUM_OF_VMIDS			8
58b843c749SSergey Zigachev 
59b843c749SSergey Zigachev static const u32 golden_settings_vega10_hdp[] =
60b843c749SSergey Zigachev {
61b843c749SSergey Zigachev 	0xf64, 0x0fffffff, 0x00000000,
62b843c749SSergey Zigachev 	0xf65, 0x0fffffff, 0x00000000,
63b843c749SSergey Zigachev 	0xf66, 0x0fffffff, 0x00000000,
64b843c749SSergey Zigachev 	0xf67, 0x0fffffff, 0x00000000,
65b843c749SSergey Zigachev 	0xf68, 0x0fffffff, 0x00000000,
66b843c749SSergey Zigachev 	0xf6a, 0x0fffffff, 0x00000000,
67b843c749SSergey Zigachev 	0xf6b, 0x0fffffff, 0x00000000,
68b843c749SSergey Zigachev 	0xf6c, 0x0fffffff, 0x00000000,
69b843c749SSergey Zigachev 	0xf6d, 0x0fffffff, 0x00000000,
70b843c749SSergey Zigachev 	0xf6e, 0x0fffffff, 0x00000000,
71b843c749SSergey Zigachev };
72b843c749SSergey Zigachev 
73b843c749SSergey Zigachev static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
74b843c749SSergey Zigachev {
75b843c749SSergey Zigachev 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
76b843c749SSergey Zigachev 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
77b843c749SSergey Zigachev };
78b843c749SSergey Zigachev 
79b843c749SSergey Zigachev static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
80b843c749SSergey Zigachev {
81b843c749SSergey Zigachev 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
82b843c749SSergey Zigachev 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
83b843c749SSergey Zigachev };
84b843c749SSergey Zigachev 
85b843c749SSergey Zigachev /* Ecc related register addresses, (BASE + reg offset) */
86b843c749SSergey Zigachev /* Universal Memory Controller caps (may be fused). */
87b843c749SSergey Zigachev /* UMCCH:UmcLocalCap */
88b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR0	(0x00014306 + 0x00000000)
89b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR1	(0x00014306 + 0x00000800)
90b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR2	(0x00014306 + 0x00001000)
91b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR3	(0x00014306 + 0x00001800)
92b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR4	(0x00054306 + 0x00000000)
93b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR5	(0x00054306 + 0x00000800)
94b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR6	(0x00054306 + 0x00001000)
95b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR7	(0x00054306 + 0x00001800)
96b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR8	(0x00094306 + 0x00000000)
97b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR9	(0x00094306 + 0x00000800)
98b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR10	(0x00094306 + 0x00001000)
99b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR11	(0x00094306 + 0x00001800)
100b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR12	(0x000d4306 + 0x00000000)
101b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR13	(0x000d4306 + 0x00000800)
102b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR14	(0x000d4306 + 0x00001000)
103b843c749SSergey Zigachev #define UMCLOCALCAPS_ADDR15	(0x000d4306 + 0x00001800)
104b843c749SSergey Zigachev 
105b843c749SSergey Zigachev /* Universal Memory Controller Channel config. */
106b843c749SSergey Zigachev /* UMCCH:UMC_CONFIG */
107b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR0	(0x00014040 + 0x00000000)
108b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR1	(0x00014040 + 0x00000800)
109b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR2	(0x00014040 + 0x00001000)
110b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR3	(0x00014040 + 0x00001800)
111b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR4	(0x00054040 + 0x00000000)
112b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR5	(0x00054040 + 0x00000800)
113b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR6	(0x00054040 + 0x00001000)
114b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR7	(0x00054040 + 0x00001800)
115b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR8	(0x00094040 + 0x00000000)
116b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR9	(0x00094040 + 0x00000800)
117b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR10	(0x00094040 + 0x00001000)
118b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR11	(0x00094040 + 0x00001800)
119b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR12	(0x000d4040 + 0x00000000)
120b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR13	(0x000d4040 + 0x00000800)
121b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR14	(0x000d4040 + 0x00001000)
122b843c749SSergey Zigachev #define UMCCH_UMC_CONFIG_ADDR15	(0x000d4040 + 0x00001800)
123b843c749SSergey Zigachev 
124b843c749SSergey Zigachev /* Universal Memory Controller Channel Ecc config. */
125b843c749SSergey Zigachev /* UMCCH:EccCtrl */
126b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR0	(0x00014053 + 0x00000000)
127b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR1	(0x00014053 + 0x00000800)
128b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR2	(0x00014053 + 0x00001000)
129b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR3	(0x00014053 + 0x00001800)
130b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR4	(0x00054053 + 0x00000000)
131b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR5	(0x00054053 + 0x00000800)
132b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR6	(0x00054053 + 0x00001000)
133b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR7	(0x00054053 + 0x00001800)
134b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR8	(0x00094053 + 0x00000000)
135b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR9	(0x00094053 + 0x00000800)
136b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR10	(0x00094053 + 0x00001000)
137b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR11	(0x00094053 + 0x00001800)
138b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR12	(0x000d4053 + 0x00000000)
139b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR13	(0x000d4053 + 0x00000800)
140b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR14	(0x000d4053 + 0x00001000)
141b843c749SSergey Zigachev #define UMCCH_ECCCTRL_ADDR15	(0x000d4053 + 0x00001800)
142b843c749SSergey Zigachev 
143b843c749SSergey Zigachev static const uint32_t ecc_umclocalcap_addrs[] = {
144b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR0,
145b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR1,
146b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR2,
147b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR3,
148b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR4,
149b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR5,
150b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR6,
151b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR7,
152b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR8,
153b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR9,
154b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR10,
155b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR11,
156b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR12,
157b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR13,
158b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR14,
159b843c749SSergey Zigachev 	UMCLOCALCAPS_ADDR15,
160b843c749SSergey Zigachev };
161b843c749SSergey Zigachev 
162b843c749SSergey Zigachev static const uint32_t ecc_umcch_umc_config_addrs[] = {
163b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR0,
164b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR1,
165b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR2,
166b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR3,
167b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR4,
168b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR5,
169b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR6,
170b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR7,
171b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR8,
172b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR9,
173b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR10,
174b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR11,
175b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR12,
176b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR13,
177b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR14,
178b843c749SSergey Zigachev 	UMCCH_UMC_CONFIG_ADDR15,
179b843c749SSergey Zigachev };
180b843c749SSergey Zigachev 
181b843c749SSergey Zigachev static const uint32_t ecc_umcch_eccctrl_addrs[] = {
182b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR0,
183b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR1,
184b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR2,
185b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR3,
186b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR4,
187b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR5,
188b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR6,
189b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR7,
190b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR8,
191b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR9,
192b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR10,
193b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR11,
194b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR12,
195b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR13,
196b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR14,
197b843c749SSergey Zigachev 	UMCCH_ECCCTRL_ADDR15,
198b843c749SSergey Zigachev };
199b843c749SSergey Zigachev 
gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)200b843c749SSergey Zigachev static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
201b843c749SSergey Zigachev 					struct amdgpu_irq_src *src,
202b843c749SSergey Zigachev 					unsigned type,
203b843c749SSergey Zigachev 					enum amdgpu_interrupt_state state)
204b843c749SSergey Zigachev {
205b843c749SSergey Zigachev 	struct amdgpu_vmhub *hub;
206b843c749SSergey Zigachev 	u32 tmp, reg, bits, i, j;
207b843c749SSergey Zigachev 
208b843c749SSergey Zigachev 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
209b843c749SSergey Zigachev 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
210b843c749SSergey Zigachev 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
211b843c749SSergey Zigachev 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
212b843c749SSergey Zigachev 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
213b843c749SSergey Zigachev 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
214b843c749SSergey Zigachev 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
215b843c749SSergey Zigachev 
216b843c749SSergey Zigachev 	switch (state) {
217b843c749SSergey Zigachev 	case AMDGPU_IRQ_STATE_DISABLE:
218b843c749SSergey Zigachev 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
219b843c749SSergey Zigachev 			hub = &adev->vmhub[j];
220b843c749SSergey Zigachev 			for (i = 0; i < 16; i++) {
221b843c749SSergey Zigachev 				reg = hub->vm_context0_cntl + i;
222b843c749SSergey Zigachev 				tmp = RREG32(reg);
223b843c749SSergey Zigachev 				tmp &= ~bits;
224b843c749SSergey Zigachev 				WREG32(reg, tmp);
225b843c749SSergey Zigachev 			}
226b843c749SSergey Zigachev 		}
227b843c749SSergey Zigachev 		break;
228b843c749SSergey Zigachev 	case AMDGPU_IRQ_STATE_ENABLE:
229b843c749SSergey Zigachev 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
230b843c749SSergey Zigachev 			hub = &adev->vmhub[j];
231b843c749SSergey Zigachev 			for (i = 0; i < 16; i++) {
232b843c749SSergey Zigachev 				reg = hub->vm_context0_cntl + i;
233b843c749SSergey Zigachev 				tmp = RREG32(reg);
234b843c749SSergey Zigachev 				tmp |= bits;
235b843c749SSergey Zigachev 				WREG32(reg, tmp);
236b843c749SSergey Zigachev 			}
237b843c749SSergey Zigachev 		}
238b843c749SSergey Zigachev 	default:
239b843c749SSergey Zigachev 		break;
240b843c749SSergey Zigachev 	}
241b843c749SSergey Zigachev 
242b843c749SSergey Zigachev 	return 0;
243b843c749SSergey Zigachev }
244b843c749SSergey Zigachev 
gmc_v9_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)245b843c749SSergey Zigachev static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
246b843c749SSergey Zigachev 				struct amdgpu_irq_src *source,
247b843c749SSergey Zigachev 				struct amdgpu_iv_entry *entry)
248b843c749SSergey Zigachev {
249b843c749SSergey Zigachev 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
250b843c749SSergey Zigachev 	uint32_t status = 0;
251b843c749SSergey Zigachev 	u64 addr;
252b843c749SSergey Zigachev 
253b843c749SSergey Zigachev 	addr = (u64)entry->src_data[0] << 12;
254b843c749SSergey Zigachev 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
255b843c749SSergey Zigachev 
256b843c749SSergey Zigachev 	if (!amdgpu_sriov_vf(adev)) {
257b843c749SSergey Zigachev 		status = RREG32(hub->vm_l2_pro_fault_status);
258b843c749SSergey Zigachev 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
259b843c749SSergey Zigachev 	}
260b843c749SSergey Zigachev 
261b843c749SSergey Zigachev 	if (printk_ratelimit()) {
262b843c749SSergey Zigachev 		struct amdgpu_task_info task_info = { 0 };
263b843c749SSergey Zigachev 
264b843c749SSergey Zigachev 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
265b843c749SSergey Zigachev 
266b843c749SSergey Zigachev 		dev_err(adev->dev,
267b843c749SSergey Zigachev 			"[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n",
268b843c749SSergey Zigachev 			entry->vmid_src ? "mmhub" : "gfxhub",
269b843c749SSergey Zigachev 			entry->src_id, entry->ring_id, entry->vmid,
270b843c749SSergey Zigachev 			entry->pasid, task_info.process_name, task_info.tgid,
271b843c749SSergey Zigachev 			task_info.task_name, task_info.pid);
272b843c749SSergey Zigachev 		dev_err(adev->dev, "  at address 0x%016llx from %d\n",
273b843c749SSergey Zigachev 			addr, entry->client_id);
274b843c749SSergey Zigachev 		if (!amdgpu_sriov_vf(adev))
275b843c749SSergey Zigachev 			dev_err(adev->dev,
276b843c749SSergey Zigachev 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
277b843c749SSergey Zigachev 				status);
278b843c749SSergey Zigachev 	}
279b843c749SSergey Zigachev 
280b843c749SSergey Zigachev 	return 0;
281b843c749SSergey Zigachev }
282b843c749SSergey Zigachev 
283b843c749SSergey Zigachev static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
284b843c749SSergey Zigachev 	.set = gmc_v9_0_vm_fault_interrupt_state,
285b843c749SSergey Zigachev 	.process = gmc_v9_0_process_interrupt,
286b843c749SSergey Zigachev };
287b843c749SSergey Zigachev 
gmc_v9_0_set_irq_funcs(struct amdgpu_device * adev)288b843c749SSergey Zigachev static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
289b843c749SSergey Zigachev {
290b843c749SSergey Zigachev 	adev->gmc.vm_fault.num_types = 1;
291b843c749SSergey Zigachev 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
292b843c749SSergey Zigachev }
293b843c749SSergey Zigachev 
gmc_v9_0_get_invalidate_req(unsigned int vmid)294b843c749SSergey Zigachev static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
295b843c749SSergey Zigachev {
296b843c749SSergey Zigachev 	u32 req = 0;
297b843c749SSergey Zigachev 
298b843c749SSergey Zigachev 	/* invalidate using legacy mode on vmid*/
299b843c749SSergey Zigachev 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
300b843c749SSergey Zigachev 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
301b843c749SSergey Zigachev 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
302b843c749SSergey Zigachev 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
303b843c749SSergey Zigachev 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
304b843c749SSergey Zigachev 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
305b843c749SSergey Zigachev 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
306b843c749SSergey Zigachev 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
307b843c749SSergey Zigachev 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
308b843c749SSergey Zigachev 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
309b843c749SSergey Zigachev 
310b843c749SSergey Zigachev 	return req;
311b843c749SSergey Zigachev }
312b843c749SSergey Zigachev 
313b843c749SSergey Zigachev /*
314b843c749SSergey Zigachev  * GART
315b843c749SSergey Zigachev  * VMID 0 is the physical GPU addresses as used by the kernel.
316b843c749SSergey Zigachev  * VMIDs 1-15 are used for userspace clients and are handled
317b843c749SSergey Zigachev  * by the amdgpu vm/hsa code.
318b843c749SSergey Zigachev  */
319b843c749SSergey Zigachev 
320b843c749SSergey Zigachev /**
321b843c749SSergey Zigachev  * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
322b843c749SSergey Zigachev  *
323b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
324b843c749SSergey Zigachev  * @vmid: vm instance to flush
325b843c749SSergey Zigachev  *
326b843c749SSergey Zigachev  * Flush the TLB for the requested page table.
327b843c749SSergey Zigachev  */
gmc_v9_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid)328b843c749SSergey Zigachev static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
329b843c749SSergey Zigachev 					uint32_t vmid)
330b843c749SSergey Zigachev {
331b843c749SSergey Zigachev 	/* Use register 17 for GART */
332b843c749SSergey Zigachev 	const unsigned eng = 17;
333b843c749SSergey Zigachev 	unsigned i, j;
334b843c749SSergey Zigachev 
335b843c749SSergey Zigachev 	spin_lock(&adev->gmc.invalidate_lock);
336b843c749SSergey Zigachev 
337b843c749SSergey Zigachev 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
338b843c749SSergey Zigachev 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
339b843c749SSergey Zigachev 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
340b843c749SSergey Zigachev 
341b843c749SSergey Zigachev 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
342b843c749SSergey Zigachev 
343b843c749SSergey Zigachev 		/* Busy wait for ACK.*/
344b843c749SSergey Zigachev 		for (j = 0; j < 100; j++) {
345b843c749SSergey Zigachev 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
346b843c749SSergey Zigachev 			tmp &= 1 << vmid;
347b843c749SSergey Zigachev 			if (tmp)
348b843c749SSergey Zigachev 				break;
349b843c749SSergey Zigachev 			cpu_relax();
350b843c749SSergey Zigachev 		}
351b843c749SSergey Zigachev 		if (j < 100)
352b843c749SSergey Zigachev 			continue;
353b843c749SSergey Zigachev 
354b843c749SSergey Zigachev 		/* Wait for ACK with a delay.*/
355b843c749SSergey Zigachev 		for (j = 0; j < adev->usec_timeout; j++) {
356b843c749SSergey Zigachev 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
357b843c749SSergey Zigachev 			tmp &= 1 << vmid;
358b843c749SSergey Zigachev 			if (tmp)
359b843c749SSergey Zigachev 				break;
360b843c749SSergey Zigachev 			udelay(1);
361b843c749SSergey Zigachev 		}
362b843c749SSergey Zigachev 		if (j < adev->usec_timeout)
363b843c749SSergey Zigachev 			continue;
364b843c749SSergey Zigachev 
365b843c749SSergey Zigachev 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
366b843c749SSergey Zigachev 	}
367b843c749SSergey Zigachev 
368b843c749SSergey Zigachev 	spin_unlock(&adev->gmc.invalidate_lock);
369b843c749SSergey Zigachev }
370b843c749SSergey Zigachev 
gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)371b843c749SSergey Zigachev static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
372b843c749SSergey Zigachev 					    unsigned vmid, uint64_t pd_addr)
373b843c749SSergey Zigachev {
374b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
375b843c749SSergey Zigachev 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
376b843c749SSergey Zigachev 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
377b843c749SSergey Zigachev 	uint64_t flags = AMDGPU_PTE_VALID;
378b843c749SSergey Zigachev 	unsigned eng = ring->vm_inv_eng;
379b843c749SSergey Zigachev 
380b843c749SSergey Zigachev 	amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags);
381b843c749SSergey Zigachev 	pd_addr |= flags;
382b843c749SSergey Zigachev 
383b843c749SSergey Zigachev 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
384b843c749SSergey Zigachev 			      lower_32_bits(pd_addr));
385b843c749SSergey Zigachev 
386b843c749SSergey Zigachev 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
387b843c749SSergey Zigachev 			      upper_32_bits(pd_addr));
388b843c749SSergey Zigachev 
389b843c749SSergey Zigachev 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
390b843c749SSergey Zigachev 					    hub->vm_inv_eng0_ack + eng,
391b843c749SSergey Zigachev 					    req, 1 << vmid);
392b843c749SSergey Zigachev 
393b843c749SSergey Zigachev 	return pd_addr;
394b843c749SSergey Zigachev }
395b843c749SSergey Zigachev 
gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned vmid,unsigned pasid)396b843c749SSergey Zigachev static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
397b843c749SSergey Zigachev 					unsigned pasid)
398b843c749SSergey Zigachev {
399b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
400b843c749SSergey Zigachev 	uint32_t reg;
401b843c749SSergey Zigachev 
402b843c749SSergey Zigachev 	if (ring->funcs->vmhub == AMDGPU_GFXHUB)
403b843c749SSergey Zigachev 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
404b843c749SSergey Zigachev 	else
405b843c749SSergey Zigachev 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
406b843c749SSergey Zigachev 
407b843c749SSergey Zigachev 	amdgpu_ring_emit_wreg(ring, reg, pasid);
408b843c749SSergey Zigachev }
409b843c749SSergey Zigachev 
410b843c749SSergey Zigachev /**
411b843c749SSergey Zigachev  * gmc_v9_0_set_pte_pde - update the page tables using MMIO
412b843c749SSergey Zigachev  *
413b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
414b843c749SSergey Zigachev  * @cpu_pt_addr: cpu address of the page table
415b843c749SSergey Zigachev  * @gpu_page_idx: entry in the page table to update
416b843c749SSergey Zigachev  * @addr: dst addr to write into pte/pde
417b843c749SSergey Zigachev  * @flags: access flags
418b843c749SSergey Zigachev  *
419b843c749SSergey Zigachev  * Update the page tables using the CPU.
420b843c749SSergey Zigachev  */
gmc_v9_0_set_pte_pde(struct amdgpu_device * adev,void * cpu_pt_addr,uint32_t gpu_page_idx,uint64_t addr,uint64_t flags)421b843c749SSergey Zigachev static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
422b843c749SSergey Zigachev 				uint32_t gpu_page_idx, uint64_t addr,
423b843c749SSergey Zigachev 				uint64_t flags)
424b843c749SSergey Zigachev {
425b843c749SSergey Zigachev 	void __iomem *ptr = (void *)cpu_pt_addr;
426b843c749SSergey Zigachev 	uint64_t value;
427b843c749SSergey Zigachev 
428b843c749SSergey Zigachev 	/*
429b843c749SSergey Zigachev 	 * PTE format on VEGA 10:
430b843c749SSergey Zigachev 	 * 63:59 reserved
431b843c749SSergey Zigachev 	 * 58:57 mtype
432b843c749SSergey Zigachev 	 * 56 F
433b843c749SSergey Zigachev 	 * 55 L
434b843c749SSergey Zigachev 	 * 54 P
435b843c749SSergey Zigachev 	 * 53 SW
436b843c749SSergey Zigachev 	 * 52 T
437b843c749SSergey Zigachev 	 * 50:48 reserved
438b843c749SSergey Zigachev 	 * 47:12 4k physical page base address
439b843c749SSergey Zigachev 	 * 11:7 fragment
440b843c749SSergey Zigachev 	 * 6 write
441b843c749SSergey Zigachev 	 * 5 read
442b843c749SSergey Zigachev 	 * 4 exe
443b843c749SSergey Zigachev 	 * 3 Z
444b843c749SSergey Zigachev 	 * 2 snooped
445b843c749SSergey Zigachev 	 * 1 system
446b843c749SSergey Zigachev 	 * 0 valid
447b843c749SSergey Zigachev 	 *
448b843c749SSergey Zigachev 	 * PDE format on VEGA 10:
449b843c749SSergey Zigachev 	 * 63:59 block fragment size
450b843c749SSergey Zigachev 	 * 58:55 reserved
451b843c749SSergey Zigachev 	 * 54 P
452b843c749SSergey Zigachev 	 * 53:48 reserved
453b843c749SSergey Zigachev 	 * 47:6 physical base address of PD or PTE
454b843c749SSergey Zigachev 	 * 5:3 reserved
455b843c749SSergey Zigachev 	 * 2 C
456b843c749SSergey Zigachev 	 * 1 system
457b843c749SSergey Zigachev 	 * 0 valid
458b843c749SSergey Zigachev 	 */
459b843c749SSergey Zigachev 
460b843c749SSergey Zigachev 	/*
461b843c749SSergey Zigachev 	 * The following is for PTE only. GART does not have PDEs.
462b843c749SSergey Zigachev 	*/
463b843c749SSergey Zigachev 	value = addr & 0x0000FFFFFFFFF000ULL;
464b843c749SSergey Zigachev 	value |= flags;
465b843c749SSergey Zigachev 	writeq(value, ptr + (gpu_page_idx * 8));
466b843c749SSergey Zigachev 	return 0;
467b843c749SSergey Zigachev }
468b843c749SSergey Zigachev 
gmc_v9_0_get_vm_pte_flags(struct amdgpu_device * adev,uint32_t flags)469b843c749SSergey Zigachev static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
470b843c749SSergey Zigachev 						uint32_t flags)
471b843c749SSergey Zigachev 
472b843c749SSergey Zigachev {
473b843c749SSergey Zigachev 	uint64_t pte_flag = 0;
474b843c749SSergey Zigachev 
475b843c749SSergey Zigachev 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
476b843c749SSergey Zigachev 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
477b843c749SSergey Zigachev 	if (flags & AMDGPU_VM_PAGE_READABLE)
478b843c749SSergey Zigachev 		pte_flag |= AMDGPU_PTE_READABLE;
479b843c749SSergey Zigachev 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
480b843c749SSergey Zigachev 		pte_flag |= AMDGPU_PTE_WRITEABLE;
481b843c749SSergey Zigachev 
482b843c749SSergey Zigachev 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
483b843c749SSergey Zigachev 	case AMDGPU_VM_MTYPE_DEFAULT:
484b843c749SSergey Zigachev 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
485b843c749SSergey Zigachev 		break;
486b843c749SSergey Zigachev 	case AMDGPU_VM_MTYPE_NC:
487b843c749SSergey Zigachev 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
488b843c749SSergey Zigachev 		break;
489b843c749SSergey Zigachev 	case AMDGPU_VM_MTYPE_WC:
490b843c749SSergey Zigachev 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
491b843c749SSergey Zigachev 		break;
492b843c749SSergey Zigachev 	case AMDGPU_VM_MTYPE_CC:
493b843c749SSergey Zigachev 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
494b843c749SSergey Zigachev 		break;
495b843c749SSergey Zigachev 	case AMDGPU_VM_MTYPE_UC:
496b843c749SSergey Zigachev 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
497b843c749SSergey Zigachev 		break;
498b843c749SSergey Zigachev 	default:
499b843c749SSergey Zigachev 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
500b843c749SSergey Zigachev 		break;
501b843c749SSergey Zigachev 	}
502b843c749SSergey Zigachev 
503b843c749SSergey Zigachev 	if (flags & AMDGPU_VM_PAGE_PRT)
504b843c749SSergey Zigachev 		pte_flag |= AMDGPU_PTE_PRT;
505b843c749SSergey Zigachev 
506b843c749SSergey Zigachev 	return pte_flag;
507b843c749SSergey Zigachev }
508b843c749SSergey Zigachev 
gmc_v9_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)509b843c749SSergey Zigachev static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
510b843c749SSergey Zigachev 				uint64_t *addr, uint64_t *flags)
511b843c749SSergey Zigachev {
512b843c749SSergey Zigachev 	if (!(*flags & AMDGPU_PDE_PTE))
513b843c749SSergey Zigachev 		*addr = adev->vm_manager.vram_base_offset + *addr -
514b843c749SSergey Zigachev 			adev->gmc.vram_start;
515b843c749SSergey Zigachev 	BUG_ON(*addr & 0xFFFF00000000003FULL);
516b843c749SSergey Zigachev 
517b843c749SSergey Zigachev 	if (!adev->gmc.translate_further)
518b843c749SSergey Zigachev 		return;
519b843c749SSergey Zigachev 
520b843c749SSergey Zigachev 	if (level == AMDGPU_VM_PDB1) {
521b843c749SSergey Zigachev 		/* Set the block fragment size */
522b843c749SSergey Zigachev 		if (!(*flags & AMDGPU_PDE_PTE))
523b843c749SSergey Zigachev 			*flags |= AMDGPU_PDE_BFS(0x9);
524b843c749SSergey Zigachev 
525b843c749SSergey Zigachev 	} else if (level == AMDGPU_VM_PDB0) {
526b843c749SSergey Zigachev 		if (*flags & AMDGPU_PDE_PTE)
527b843c749SSergey Zigachev 			*flags &= ~AMDGPU_PDE_PTE;
528b843c749SSergey Zigachev 		else
529b843c749SSergey Zigachev 			*flags |= AMDGPU_PTE_TF;
530b843c749SSergey Zigachev 	}
531b843c749SSergey Zigachev }
532b843c749SSergey Zigachev 
533b843c749SSergey Zigachev static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
534b843c749SSergey Zigachev 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
535b843c749SSergey Zigachev 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
536b843c749SSergey Zigachev 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
537b843c749SSergey Zigachev 	.set_pte_pde = gmc_v9_0_set_pte_pde,
538b843c749SSergey Zigachev 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
539b843c749SSergey Zigachev 	.get_vm_pde = gmc_v9_0_get_vm_pde
540b843c749SSergey Zigachev };
541b843c749SSergey Zigachev 
gmc_v9_0_set_gmc_funcs(struct amdgpu_device * adev)542b843c749SSergey Zigachev static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
543b843c749SSergey Zigachev {
544b843c749SSergey Zigachev 	if (adev->gmc.gmc_funcs == NULL)
545b843c749SSergey Zigachev 		adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
546b843c749SSergey Zigachev }
547b843c749SSergey Zigachev 
gmc_v9_0_early_init(void * handle)548b843c749SSergey Zigachev static int gmc_v9_0_early_init(void *handle)
549b843c749SSergey Zigachev {
550b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
551b843c749SSergey Zigachev 
552b843c749SSergey Zigachev 	gmc_v9_0_set_gmc_funcs(adev);
553b843c749SSergey Zigachev 	gmc_v9_0_set_irq_funcs(adev);
554b843c749SSergey Zigachev 
555b843c749SSergey Zigachev 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
556b843c749SSergey Zigachev 	adev->gmc.shared_aperture_end =
557b843c749SSergey Zigachev 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
558b843c749SSergey Zigachev 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
559b843c749SSergey Zigachev 	adev->gmc.private_aperture_end =
560b843c749SSergey Zigachev 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
561b843c749SSergey Zigachev 
562b843c749SSergey Zigachev 	return 0;
563b843c749SSergey Zigachev }
564b843c749SSergey Zigachev 
gmc_v9_0_ecc_available(struct amdgpu_device * adev)565b843c749SSergey Zigachev static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
566b843c749SSergey Zigachev {
567b843c749SSergey Zigachev 	uint32_t reg_val;
568b843c749SSergey Zigachev 	uint32_t reg_addr;
569b843c749SSergey Zigachev 	uint32_t field_val;
570b843c749SSergey Zigachev 	size_t i;
571b843c749SSergey Zigachev 	uint32_t fv2;
572b843c749SSergey Zigachev 	size_t lost_sheep;
573b843c749SSergey Zigachev 
574b843c749SSergey Zigachev 	DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
575b843c749SSergey Zigachev 
576b843c749SSergey Zigachev 	lost_sheep = 0;
577b843c749SSergey Zigachev 	for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
578b843c749SSergey Zigachev 		reg_addr = ecc_umclocalcap_addrs[i];
579b843c749SSergey Zigachev 		DRM_DEBUG("ecc: "
580b843c749SSergey Zigachev 			  "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
581b843c749SSergey Zigachev 			  i, reg_addr);
582b843c749SSergey Zigachev 		reg_val = RREG32(reg_addr);
583b843c749SSergey Zigachev 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
584b843c749SSergey Zigachev 					  EccDis);
585b843c749SSergey Zigachev 		DRM_DEBUG("ecc: "
586b843c749SSergey Zigachev 			  "reg_val: 0x%08x, "
587b843c749SSergey Zigachev 			  "EccDis: 0x%08x, ",
588b843c749SSergey Zigachev 			  reg_val, field_val);
589b843c749SSergey Zigachev 		if (field_val) {
590b843c749SSergey Zigachev 			DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
591b843c749SSergey Zigachev 			++lost_sheep;
592b843c749SSergey Zigachev 		}
593b843c749SSergey Zigachev 	}
594b843c749SSergey Zigachev 
595b843c749SSergey Zigachev 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
596b843c749SSergey Zigachev 		reg_addr = ecc_umcch_umc_config_addrs[i];
597b843c749SSergey Zigachev 		DRM_DEBUG("ecc: "
598b843c749SSergey Zigachev 			  "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
599b843c749SSergey Zigachev 			  i, reg_addr);
600b843c749SSergey Zigachev 		reg_val = RREG32(reg_addr);
601b843c749SSergey Zigachev 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
602b843c749SSergey Zigachev 					  DramReady);
603b843c749SSergey Zigachev 		DRM_DEBUG("ecc: "
604b843c749SSergey Zigachev 			  "reg_val: 0x%08x, "
605b843c749SSergey Zigachev 			  "DramReady: 0x%08x\n",
606b843c749SSergey Zigachev 			  reg_val, field_val);
607b843c749SSergey Zigachev 
608b843c749SSergey Zigachev 		if (!field_val) {
609b843c749SSergey Zigachev 			DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
610b843c749SSergey Zigachev 			++lost_sheep;
611b843c749SSergey Zigachev 		}
612b843c749SSergey Zigachev 	}
613b843c749SSergey Zigachev 
614b843c749SSergey Zigachev 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
615b843c749SSergey Zigachev 		reg_addr = ecc_umcch_eccctrl_addrs[i];
616b843c749SSergey Zigachev 		DRM_DEBUG("ecc: "
617b843c749SSergey Zigachev 			  "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
618b843c749SSergey Zigachev 			  i, reg_addr);
619b843c749SSergey Zigachev 		reg_val = RREG32(reg_addr);
620b843c749SSergey Zigachev 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
621b843c749SSergey Zigachev 					  WrEccEn);
622b843c749SSergey Zigachev 		fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
623b843c749SSergey Zigachev 				    RdEccEn);
624b843c749SSergey Zigachev 		DRM_DEBUG("ecc: "
625b843c749SSergey Zigachev 			  "reg_val: 0x%08x, "
626b843c749SSergey Zigachev 			  "WrEccEn: 0x%08x, "
627b843c749SSergey Zigachev 			  "RdEccEn: 0x%08x\n",
628b843c749SSergey Zigachev 			  reg_val, field_val, fv2);
629b843c749SSergey Zigachev 
630b843c749SSergey Zigachev 		if (!field_val) {
631b843c749SSergey Zigachev 			DRM_DEBUG("ecc: WrEccEn is not set\n");
632b843c749SSergey Zigachev 			++lost_sheep;
633b843c749SSergey Zigachev 		}
634b843c749SSergey Zigachev 		if (!fv2) {
635b843c749SSergey Zigachev 			DRM_DEBUG("ecc: RdEccEn is not set\n");
636b843c749SSergey Zigachev 			++lost_sheep;
637b843c749SSergey Zigachev 		}
638b843c749SSergey Zigachev 	}
639b843c749SSergey Zigachev 
640b843c749SSergey Zigachev 	DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
641b843c749SSergey Zigachev 	return lost_sheep == 0;
642b843c749SSergey Zigachev }
643b843c749SSergey Zigachev 
gmc_v9_0_late_init(void * handle)644b843c749SSergey Zigachev static int gmc_v9_0_late_init(void *handle)
645b843c749SSergey Zigachev {
646b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
647b843c749SSergey Zigachev 	/*
648b843c749SSergey Zigachev 	 * The latest engine allocation on gfx9 is:
649b843c749SSergey Zigachev 	 * Engine 0, 1: idle
650b843c749SSergey Zigachev 	 * Engine 2, 3: firmware
651b843c749SSergey Zigachev 	 * Engine 4~13: amdgpu ring, subject to change when ring number changes
652b843c749SSergey Zigachev 	 * Engine 14~15: idle
653b843c749SSergey Zigachev 	 * Engine 16: kfd tlb invalidation
654b843c749SSergey Zigachev 	 * Engine 17: Gart flushes
655b843c749SSergey Zigachev 	 */
656b843c749SSergey Zigachev 	unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
657b843c749SSergey Zigachev 	unsigned i;
658b843c749SSergey Zigachev 	int r;
659b843c749SSergey Zigachev 
660b843c749SSergey Zigachev 	/*
661b843c749SSergey Zigachev 	 * TODO - Uncomment once GART corruption issue is fixed.
662b843c749SSergey Zigachev 	 */
663b843c749SSergey Zigachev 	/* amdgpu_bo_late_init(adev); */
664b843c749SSergey Zigachev 
665b843c749SSergey Zigachev 	for(i = 0; i < adev->num_rings; ++i) {
666b843c749SSergey Zigachev 		struct amdgpu_ring *ring = adev->rings[i];
667b843c749SSergey Zigachev 		unsigned vmhub = ring->funcs->vmhub;
668b843c749SSergey Zigachev 
669b843c749SSergey Zigachev 		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
670b843c749SSergey Zigachev 		dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
671b843c749SSergey Zigachev 			 ring->idx, ring->name, ring->vm_inv_eng,
672b843c749SSergey Zigachev 			 ring->funcs->vmhub);
673b843c749SSergey Zigachev 	}
674b843c749SSergey Zigachev 
675b843c749SSergey Zigachev 	/* Engine 16 is used for KFD and 17 for GART flushes */
676b843c749SSergey Zigachev 	for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
677b843c749SSergey Zigachev 		BUG_ON(vm_inv_eng[i] > 16);
678b843c749SSergey Zigachev 
679b843c749SSergey Zigachev 	if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
680b843c749SSergey Zigachev 		r = gmc_v9_0_ecc_available(adev);
681b843c749SSergey Zigachev 		if (r == 1) {
682b843c749SSergey Zigachev 			DRM_INFO("ECC is active.\n");
683b843c749SSergey Zigachev 		} else if (r == 0) {
684b843c749SSergey Zigachev 			DRM_INFO("ECC is not present.\n");
685b843c749SSergey Zigachev 			adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
686b843c749SSergey Zigachev 		} else {
687b843c749SSergey Zigachev 			DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
688b843c749SSergey Zigachev 			return r;
689b843c749SSergey Zigachev 		}
690b843c749SSergey Zigachev 	}
691b843c749SSergey Zigachev 
692b843c749SSergey Zigachev 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
693b843c749SSergey Zigachev }
694b843c749SSergey Zigachev 
gmc_v9_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)695b843c749SSergey Zigachev static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
696b843c749SSergey Zigachev 					struct amdgpu_gmc *mc)
697b843c749SSergey Zigachev {
698b843c749SSergey Zigachev 	u64 base = 0;
699b843c749SSergey Zigachev 	if (!amdgpu_sriov_vf(adev))
700b843c749SSergey Zigachev 		base = mmhub_v1_0_get_fb_location(adev);
701b843c749SSergey Zigachev 	amdgpu_device_vram_location(adev, &adev->gmc, base);
702b843c749SSergey Zigachev 	amdgpu_device_gart_location(adev, mc);
703b843c749SSergey Zigachev 	/* base offset of vram pages */
704b843c749SSergey Zigachev 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
705b843c749SSergey Zigachev }
706b843c749SSergey Zigachev 
707b843c749SSergey Zigachev /**
708b843c749SSergey Zigachev  * gmc_v9_0_mc_init - initialize the memory controller driver params
709b843c749SSergey Zigachev  *
710b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
711b843c749SSergey Zigachev  *
712b843c749SSergey Zigachev  * Look up the amount of vram, vram width, and decide how to place
713b843c749SSergey Zigachev  * vram and gart within the GPU's physical address space.
714b843c749SSergey Zigachev  * Returns 0 for success.
715b843c749SSergey Zigachev  */
gmc_v9_0_mc_init(struct amdgpu_device * adev)716b843c749SSergey Zigachev static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
717b843c749SSergey Zigachev {
718b843c749SSergey Zigachev 	int chansize, numchan;
719b843c749SSergey Zigachev 	int r;
720b843c749SSergey Zigachev 
721b843c749SSergey Zigachev 	if (amdgpu_emu_mode != 1)
722b843c749SSergey Zigachev 		adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
723b843c749SSergey Zigachev 	if (!adev->gmc.vram_width) {
724b843c749SSergey Zigachev 		/* hbm memory channel size */
725b843c749SSergey Zigachev 		if (adev->flags & AMD_IS_APU)
726b843c749SSergey Zigachev 			chansize = 64;
727b843c749SSergey Zigachev 		else
728b843c749SSergey Zigachev 			chansize = 128;
729b843c749SSergey Zigachev 
730b843c749SSergey Zigachev 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
731b843c749SSergey Zigachev 		adev->gmc.vram_width = numchan * chansize;
732b843c749SSergey Zigachev 	}
733b843c749SSergey Zigachev 
734b843c749SSergey Zigachev 	/* size in MB on si */
735b843c749SSergey Zigachev 	adev->gmc.mc_vram_size =
736b843c749SSergey Zigachev 		adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
737b843c749SSergey Zigachev 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
738b843c749SSergey Zigachev 
739b843c749SSergey Zigachev 	if (!(adev->flags & AMD_IS_APU)) {
740b843c749SSergey Zigachev 		r = amdgpu_device_resize_fb_bar(adev);
741b843c749SSergey Zigachev 		if (r)
742b843c749SSergey Zigachev 			return r;
743b843c749SSergey Zigachev 	}
744b843c749SSergey Zigachev 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
745b843c749SSergey Zigachev 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
746b843c749SSergey Zigachev 
747b843c749SSergey Zigachev #ifdef CONFIG_X86_64
748b843c749SSergey Zigachev 	if (adev->flags & AMD_IS_APU) {
749b843c749SSergey Zigachev 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
750b843c749SSergey Zigachev 		adev->gmc.aper_size = adev->gmc.real_vram_size;
751b843c749SSergey Zigachev 	}
752b843c749SSergey Zigachev #endif
753b843c749SSergey Zigachev 	/* In case the PCI BAR is larger than the actual amount of vram */
754b843c749SSergey Zigachev 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
755b843c749SSergey Zigachev 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
756b843c749SSergey Zigachev 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
757b843c749SSergey Zigachev 
758b843c749SSergey Zigachev 	/* set the gart size */
759b843c749SSergey Zigachev 	if (amdgpu_gart_size == -1) {
760b843c749SSergey Zigachev 		switch (adev->asic_type) {
761b843c749SSergey Zigachev 		case CHIP_VEGA10:  /* all engines support GPUVM */
762b843c749SSergey Zigachev 		case CHIP_VEGA12:  /* all engines support GPUVM */
763b843c749SSergey Zigachev 		case CHIP_VEGA20:
764b843c749SSergey Zigachev 		default:
765b843c749SSergey Zigachev 			adev->gmc.gart_size = 512ULL << 20;
766b843c749SSergey Zigachev 			break;
767b843c749SSergey Zigachev 		case CHIP_RAVEN:   /* DCE SG support */
768b843c749SSergey Zigachev 			adev->gmc.gart_size = 1024ULL << 20;
769b843c749SSergey Zigachev 			break;
770b843c749SSergey Zigachev 		}
771b843c749SSergey Zigachev 	} else {
772b843c749SSergey Zigachev 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
773b843c749SSergey Zigachev 	}
774b843c749SSergey Zigachev 
775b843c749SSergey Zigachev 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
776b843c749SSergey Zigachev 
777b843c749SSergey Zigachev 	return 0;
778b843c749SSergey Zigachev }
779b843c749SSergey Zigachev 
gmc_v9_0_gart_init(struct amdgpu_device * adev)780b843c749SSergey Zigachev static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
781b843c749SSergey Zigachev {
782b843c749SSergey Zigachev 	int r;
783b843c749SSergey Zigachev 
784b843c749SSergey Zigachev 	if (adev->gart.robj) {
785b843c749SSergey Zigachev 		WARN(1, "VEGA10 PCIE GART already initialized\n");
786b843c749SSergey Zigachev 		return 0;
787b843c749SSergey Zigachev 	}
788b843c749SSergey Zigachev 	/* Initialize common gart structure */
789b843c749SSergey Zigachev 	r = amdgpu_gart_init(adev);
790b843c749SSergey Zigachev 	if (r)
791b843c749SSergey Zigachev 		return r;
792b843c749SSergey Zigachev 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
793b843c749SSergey Zigachev 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
794b843c749SSergey Zigachev 				 AMDGPU_PTE_EXECUTABLE;
795b843c749SSergey Zigachev 	return amdgpu_gart_table_vram_alloc(adev);
796b843c749SSergey Zigachev }
797b843c749SSergey Zigachev 
gmc_v9_0_get_vbios_fb_size(struct amdgpu_device * adev)798b843c749SSergey Zigachev static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
799b843c749SSergey Zigachev {
800b843c749SSergey Zigachev #if 0
801b843c749SSergey Zigachev 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
802b843c749SSergey Zigachev #endif
803b843c749SSergey Zigachev 	unsigned size;
804b843c749SSergey Zigachev 
805b843c749SSergey Zigachev 	/*
806b843c749SSergey Zigachev 	 * TODO Remove once GART corruption is resolved
807b843c749SSergey Zigachev 	 * Check related code in gmc_v9_0_sw_fini
808b843c749SSergey Zigachev 	 * */
809*ef7b4fc3SMatthew Dillon 	size = 64 * 1024 * 1024;
810b843c749SSergey Zigachev 
811b843c749SSergey Zigachev #if 0
812b843c749SSergey Zigachev 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
813b843c749SSergey Zigachev 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
814b843c749SSergey Zigachev 	} else {
815b843c749SSergey Zigachev 		u32 viewport;
816b843c749SSergey Zigachev 
817b843c749SSergey Zigachev 		switch (adev->asic_type) {
818b843c749SSergey Zigachev 		case CHIP_RAVEN:
819b843c749SSergey Zigachev 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
820b843c749SSergey Zigachev 			size = (REG_GET_FIELD(viewport,
821b843c749SSergey Zigachev 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
822b843c749SSergey Zigachev 				REG_GET_FIELD(viewport,
823b843c749SSergey Zigachev 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
824b843c749SSergey Zigachev 				4);
825b843c749SSergey Zigachev 			break;
826b843c749SSergey Zigachev 		case CHIP_VEGA10:
827b843c749SSergey Zigachev 		case CHIP_VEGA12:
828b843c749SSergey Zigachev 		default:
829b843c749SSergey Zigachev 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
830b843c749SSergey Zigachev 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
831b843c749SSergey Zigachev 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
832b843c749SSergey Zigachev 				4);
833b843c749SSergey Zigachev 			break;
834b843c749SSergey Zigachev 		}
835b843c749SSergey Zigachev 	}
836b843c749SSergey Zigachev 	/* return 0 if the pre-OS buffer uses up most of vram */
837b843c749SSergey Zigachev 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
838b843c749SSergey Zigachev 		return 0;
839b843c749SSergey Zigachev 
840b843c749SSergey Zigachev #endif
841b843c749SSergey Zigachev 	return size;
842b843c749SSergey Zigachev }
843b843c749SSergey Zigachev 
gmc_v9_0_sw_init(void * handle)844b843c749SSergey Zigachev static int gmc_v9_0_sw_init(void *handle)
845b843c749SSergey Zigachev {
846b843c749SSergey Zigachev 	int r;
847b843c749SSergey Zigachev 	int dma_bits;
848b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
849b843c749SSergey Zigachev 
850b843c749SSergey Zigachev 	gfxhub_v1_0_init(adev);
851b843c749SSergey Zigachev 	mmhub_v1_0_init(adev);
852b843c749SSergey Zigachev 
85378973132SSergey Zigachev 	spin_init(&adev->gmc.invalidate_lock, "aggmcil");
854b843c749SSergey Zigachev 
855b843c749SSergey Zigachev 	adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
856b843c749SSergey Zigachev 	switch (adev->asic_type) {
857b843c749SSergey Zigachev 	case CHIP_RAVEN:
858b843c749SSergey Zigachev 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
859b843c749SSergey Zigachev 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
860b843c749SSergey Zigachev 		} else {
861b843c749SSergey Zigachev 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
862b843c749SSergey Zigachev 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
863b843c749SSergey Zigachev 			adev->gmc.translate_further =
864b843c749SSergey Zigachev 				adev->vm_manager.num_level > 1;
865b843c749SSergey Zigachev 		}
866b843c749SSergey Zigachev 		break;
867b843c749SSergey Zigachev 	case CHIP_VEGA10:
868b843c749SSergey Zigachev 	case CHIP_VEGA12:
869b843c749SSergey Zigachev 	case CHIP_VEGA20:
870b843c749SSergey Zigachev 		/*
871b843c749SSergey Zigachev 		 * To fulfill 4-level page support,
872b843c749SSergey Zigachev 		 * vm size is 256TB (48bit), maximum size of Vega10,
873b843c749SSergey Zigachev 		 * block size 512 (9bit)
874b843c749SSergey Zigachev 		 */
875b843c749SSergey Zigachev 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
876b843c749SSergey Zigachev 		break;
877b843c749SSergey Zigachev 	default:
878b843c749SSergey Zigachev 		break;
879b843c749SSergey Zigachev 	}
880b843c749SSergey Zigachev 
881b843c749SSergey Zigachev 	/* This interrupt is VMC page fault.*/
882b843c749SSergey Zigachev 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
883b843c749SSergey Zigachev 				&adev->gmc.vm_fault);
884b843c749SSergey Zigachev 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
885b843c749SSergey Zigachev 				&adev->gmc.vm_fault);
886b843c749SSergey Zigachev 
887b843c749SSergey Zigachev 	if (r)
888b843c749SSergey Zigachev 		return r;
889b843c749SSergey Zigachev 
890b843c749SSergey Zigachev 	/* Set the internal MC address mask
891b843c749SSergey Zigachev 	 * This is the max address of the GPU's
892b843c749SSergey Zigachev 	 * internal address space.
893b843c749SSergey Zigachev 	 */
894b843c749SSergey Zigachev 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
895b843c749SSergey Zigachev 
896b843c749SSergey Zigachev 	/* set DMA mask + need_dma32 flags.
897b843c749SSergey Zigachev 	 * PCIE - can handle 44-bits.
898b843c749SSergey Zigachev 	 * IGP - can handle 44-bits
899b843c749SSergey Zigachev 	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
900b843c749SSergey Zigachev 	 */
901b843c749SSergey Zigachev 	adev->need_dma32 = false;
902b843c749SSergey Zigachev 	dma_bits = adev->need_dma32 ? 32 : 44;
903b843c749SSergey Zigachev 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
904b843c749SSergey Zigachev 	if (r) {
905b843c749SSergey Zigachev 		adev->need_dma32 = true;
906b843c749SSergey Zigachev 		dma_bits = 32;
907b843c749SSergey Zigachev 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
908b843c749SSergey Zigachev 	}
909b843c749SSergey Zigachev 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
910b843c749SSergey Zigachev 	if (r) {
911b843c749SSergey Zigachev 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
912b843c749SSergey Zigachev 		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
913b843c749SSergey Zigachev 	}
91478973132SSergey Zigachev #if 0
915b843c749SSergey Zigachev 	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
91678973132SSergey Zigachev #endif
91778973132SSergey Zigachev 	/* XXX DragonFly: FreeBSD implementation returns false
91878973132SSergey Zigachev 	 * drm-kmod excerpt:
91978973132SSergey Zigachev 	 * Only used in combination with CONFIG_SWIOTLB in v4.17
92078973132SSergey Zigachev 	 * BSDFIXME: Let's say we can dma all physical memory...
92178973132SSergey Zigachev 	 */
92278973132SSergey Zigachev 	adev->need_swiotlb = false;
923b843c749SSergey Zigachev 
924b843c749SSergey Zigachev 	r = gmc_v9_0_mc_init(adev);
925b843c749SSergey Zigachev 	if (r)
926b843c749SSergey Zigachev 		return r;
927b843c749SSergey Zigachev 
928b843c749SSergey Zigachev 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
929b843c749SSergey Zigachev 
930b843c749SSergey Zigachev 	/* Memory manager */
931b843c749SSergey Zigachev 	r = amdgpu_bo_init(adev);
932b843c749SSergey Zigachev 	if (r)
933b843c749SSergey Zigachev 		return r;
934b843c749SSergey Zigachev 
935b843c749SSergey Zigachev 	r = gmc_v9_0_gart_init(adev);
936b843c749SSergey Zigachev 	if (r)
937b843c749SSergey Zigachev 		return r;
938b843c749SSergey Zigachev 
939b843c749SSergey Zigachev 	/*
940b843c749SSergey Zigachev 	 * number of VMs
941b843c749SSergey Zigachev 	 * VMID 0 is reserved for System
942b843c749SSergey Zigachev 	 * amdgpu graphics/compute will use VMIDs 1-7
943b843c749SSergey Zigachev 	 * amdkfd will use VMIDs 8-15
944b843c749SSergey Zigachev 	 */
945b843c749SSergey Zigachev 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
946b843c749SSergey Zigachev 	adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
947b843c749SSergey Zigachev 
948b843c749SSergey Zigachev 	amdgpu_vm_manager_init(adev);
949b843c749SSergey Zigachev 
950b843c749SSergey Zigachev 	return 0;
951b843c749SSergey Zigachev }
952b843c749SSergey Zigachev 
gmc_v9_0_sw_fini(void * handle)953b843c749SSergey Zigachev static int gmc_v9_0_sw_fini(void *handle)
954b843c749SSergey Zigachev {
955b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
956b843c749SSergey Zigachev 
957b843c749SSergey Zigachev 	amdgpu_gem_force_release(adev);
958b843c749SSergey Zigachev 	amdgpu_vm_manager_fini(adev);
959b843c749SSergey Zigachev 
960b843c749SSergey Zigachev 	/*
961b843c749SSergey Zigachev 	* TODO:
962b843c749SSergey Zigachev 	* Currently there is a bug where some memory client outside
963b843c749SSergey Zigachev 	* of the driver writes to first 8M of VRAM on S3 resume,
964b843c749SSergey Zigachev 	* this overrides GART which by default gets placed in first 8M and
965b843c749SSergey Zigachev 	* causes VM_FAULTS once GTT is accessed.
966b843c749SSergey Zigachev 	* Keep the stolen memory reservation until the while this is not solved.
967b843c749SSergey Zigachev 	* Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
968b843c749SSergey Zigachev 	*/
969b843c749SSergey Zigachev 	amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
970b843c749SSergey Zigachev 
971b843c749SSergey Zigachev 	amdgpu_gart_table_vram_free(adev);
972b843c749SSergey Zigachev 	amdgpu_bo_fini(adev);
973b843c749SSergey Zigachev 	amdgpu_gart_fini(adev);
974b843c749SSergey Zigachev 
975b843c749SSergey Zigachev 	return 0;
976b843c749SSergey Zigachev }
977b843c749SSergey Zigachev 
gmc_v9_0_init_golden_registers(struct amdgpu_device * adev)978b843c749SSergey Zigachev static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
979b843c749SSergey Zigachev {
980b843c749SSergey Zigachev 
981b843c749SSergey Zigachev 	switch (adev->asic_type) {
982b843c749SSergey Zigachev 	case CHIP_VEGA10:
983b843c749SSergey Zigachev 	case CHIP_VEGA20:
984b843c749SSergey Zigachev 		soc15_program_register_sequence(adev,
985b843c749SSergey Zigachev 						golden_settings_mmhub_1_0_0,
986b843c749SSergey Zigachev 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
987b843c749SSergey Zigachev 		soc15_program_register_sequence(adev,
988b843c749SSergey Zigachev 						golden_settings_athub_1_0_0,
989b843c749SSergey Zigachev 						ARRAY_SIZE(golden_settings_athub_1_0_0));
990b843c749SSergey Zigachev 		break;
991b843c749SSergey Zigachev 	case CHIP_VEGA12:
992b843c749SSergey Zigachev 		break;
993b843c749SSergey Zigachev 	case CHIP_RAVEN:
994b843c749SSergey Zigachev 		soc15_program_register_sequence(adev,
995b843c749SSergey Zigachev 						golden_settings_athub_1_0_0,
996b843c749SSergey Zigachev 						ARRAY_SIZE(golden_settings_athub_1_0_0));
997b843c749SSergey Zigachev 		break;
998b843c749SSergey Zigachev 	default:
999b843c749SSergey Zigachev 		break;
1000b843c749SSergey Zigachev 	}
1001b843c749SSergey Zigachev }
1002b843c749SSergey Zigachev 
1003b843c749SSergey Zigachev /**
1004b843c749SSergey Zigachev  * gmc_v9_0_restore_registers - restores regs
1005b843c749SSergey Zigachev  *
1006b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1007b843c749SSergey Zigachev  *
1008b843c749SSergey Zigachev  * This restores register values, saved at suspend.
1009b843c749SSergey Zigachev  */
gmc_v9_0_restore_registers(struct amdgpu_device * adev)1010b843c749SSergey Zigachev static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1011b843c749SSergey Zigachev {
1012b843c749SSergey Zigachev 	if (adev->asic_type == CHIP_RAVEN)
1013b843c749SSergey Zigachev 		WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1014b843c749SSergey Zigachev }
1015b843c749SSergey Zigachev 
1016b843c749SSergey Zigachev /**
1017b843c749SSergey Zigachev  * gmc_v9_0_gart_enable - gart enable
1018b843c749SSergey Zigachev  *
1019b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1020b843c749SSergey Zigachev  */
gmc_v9_0_gart_enable(struct amdgpu_device * adev)1021b843c749SSergey Zigachev static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1022b843c749SSergey Zigachev {
1023b843c749SSergey Zigachev 	int r;
1024b843c749SSergey Zigachev 	bool value;
1025b843c749SSergey Zigachev 	u32 tmp;
1026b843c749SSergey Zigachev 
1027b843c749SSergey Zigachev 	amdgpu_device_program_register_sequence(adev,
1028b843c749SSergey Zigachev 						golden_settings_vega10_hdp,
1029b843c749SSergey Zigachev 						ARRAY_SIZE(golden_settings_vega10_hdp));
1030b843c749SSergey Zigachev 
1031b843c749SSergey Zigachev 	if (adev->gart.robj == NULL) {
1032b843c749SSergey Zigachev 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1033b843c749SSergey Zigachev 		return -EINVAL;
1034b843c749SSergey Zigachev 	}
1035b843c749SSergey Zigachev 	r = amdgpu_gart_table_vram_pin(adev);
1036b843c749SSergey Zigachev 	if (r)
1037b843c749SSergey Zigachev 		return r;
1038b843c749SSergey Zigachev 
1039b843c749SSergey Zigachev 	switch (adev->asic_type) {
1040b843c749SSergey Zigachev 	case CHIP_RAVEN:
1041b843c749SSergey Zigachev 		mmhub_v1_0_initialize_power_gating(adev);
1042b843c749SSergey Zigachev 		mmhub_v1_0_update_power_gating(adev, true);
1043b843c749SSergey Zigachev 		break;
1044b843c749SSergey Zigachev 	default:
1045b843c749SSergey Zigachev 		break;
1046b843c749SSergey Zigachev 	}
1047b843c749SSergey Zigachev 
1048b843c749SSergey Zigachev 	r = gfxhub_v1_0_gart_enable(adev);
1049b843c749SSergey Zigachev 	if (r)
1050b843c749SSergey Zigachev 		return r;
1051b843c749SSergey Zigachev 
1052b843c749SSergey Zigachev 	r = mmhub_v1_0_gart_enable(adev);
1053b843c749SSergey Zigachev 	if (r)
1054b843c749SSergey Zigachev 		return r;
1055b843c749SSergey Zigachev 
1056b843c749SSergey Zigachev 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1057b843c749SSergey Zigachev 
1058b843c749SSergey Zigachev 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1059b843c749SSergey Zigachev 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1060b843c749SSergey Zigachev 
1061b843c749SSergey Zigachev 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1062b843c749SSergey Zigachev 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1063b843c749SSergey Zigachev 
1064b843c749SSergey Zigachev 	/* After HDP is initialized, flush HDP.*/
1065b843c749SSergey Zigachev 	adev->nbio_funcs->hdp_flush(adev, NULL);
1066b843c749SSergey Zigachev 
1067b843c749SSergey Zigachev 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1068b843c749SSergey Zigachev 		value = false;
1069b843c749SSergey Zigachev 	else
1070b843c749SSergey Zigachev 		value = true;
1071b843c749SSergey Zigachev 
1072b843c749SSergey Zigachev 	gfxhub_v1_0_set_fault_enable_default(adev, value);
1073b843c749SSergey Zigachev 	mmhub_v1_0_set_fault_enable_default(adev, value);
1074b843c749SSergey Zigachev 	gmc_v9_0_flush_gpu_tlb(adev, 0);
1075b843c749SSergey Zigachev 
1076b843c749SSergey Zigachev 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1077b843c749SSergey Zigachev 		 (unsigned)(adev->gmc.gart_size >> 20),
1078b843c749SSergey Zigachev 		 (unsigned long long)adev->gart.table_addr);
1079b843c749SSergey Zigachev 	adev->gart.ready = true;
1080b843c749SSergey Zigachev 	return 0;
1081b843c749SSergey Zigachev }
1082b843c749SSergey Zigachev 
gmc_v9_0_hw_init(void * handle)1083b843c749SSergey Zigachev static int gmc_v9_0_hw_init(void *handle)
1084b843c749SSergey Zigachev {
1085b843c749SSergey Zigachev 	int r;
1086b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1087b843c749SSergey Zigachev 
1088b843c749SSergey Zigachev 	/* The sequence of these two function calls matters.*/
1089b843c749SSergey Zigachev 	gmc_v9_0_init_golden_registers(adev);
1090b843c749SSergey Zigachev 
1091b843c749SSergey Zigachev 	if (adev->mode_info.num_crtc) {
1092b843c749SSergey Zigachev 		/* Lockout access through VGA aperture*/
1093b843c749SSergey Zigachev 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1094b843c749SSergey Zigachev 
1095b843c749SSergey Zigachev 		/* disable VGA render */
1096b843c749SSergey Zigachev 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1097b843c749SSergey Zigachev 	}
1098b843c749SSergey Zigachev 
1099b843c749SSergey Zigachev 	r = gmc_v9_0_gart_enable(adev);
1100b843c749SSergey Zigachev 
1101b843c749SSergey Zigachev 	return r;
1102b843c749SSergey Zigachev }
1103b843c749SSergey Zigachev 
1104b843c749SSergey Zigachev /**
1105b843c749SSergey Zigachev  * gmc_v9_0_save_registers - saves regs
1106b843c749SSergey Zigachev  *
1107b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1108b843c749SSergey Zigachev  *
1109b843c749SSergey Zigachev  * This saves potential register values that should be
1110b843c749SSergey Zigachev  * restored upon resume
1111b843c749SSergey Zigachev  */
gmc_v9_0_save_registers(struct amdgpu_device * adev)1112b843c749SSergey Zigachev static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1113b843c749SSergey Zigachev {
1114b843c749SSergey Zigachev 	if (adev->asic_type == CHIP_RAVEN)
1115b843c749SSergey Zigachev 		adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1116b843c749SSergey Zigachev }
1117b843c749SSergey Zigachev 
1118b843c749SSergey Zigachev /**
1119b843c749SSergey Zigachev  * gmc_v9_0_gart_disable - gart disable
1120b843c749SSergey Zigachev  *
1121b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
1122b843c749SSergey Zigachev  *
1123b843c749SSergey Zigachev  * This disables all VM page table.
1124b843c749SSergey Zigachev  */
gmc_v9_0_gart_disable(struct amdgpu_device * adev)1125b843c749SSergey Zigachev static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1126b843c749SSergey Zigachev {
1127b843c749SSergey Zigachev 	gfxhub_v1_0_gart_disable(adev);
1128b843c749SSergey Zigachev 	mmhub_v1_0_gart_disable(adev);
1129b843c749SSergey Zigachev 	amdgpu_gart_table_vram_unpin(adev);
1130b843c749SSergey Zigachev }
1131b843c749SSergey Zigachev 
gmc_v9_0_hw_fini(void * handle)1132b843c749SSergey Zigachev static int gmc_v9_0_hw_fini(void *handle)
1133b843c749SSergey Zigachev {
1134b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1135b843c749SSergey Zigachev 
1136b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev)) {
1137b843c749SSergey Zigachev 		/* full access mode, so don't touch any GMC register */
1138b843c749SSergey Zigachev 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1139b843c749SSergey Zigachev 		return 0;
1140b843c749SSergey Zigachev 	}
1141b843c749SSergey Zigachev 
1142b843c749SSergey Zigachev 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1143b843c749SSergey Zigachev 	gmc_v9_0_gart_disable(adev);
1144b843c749SSergey Zigachev 
1145b843c749SSergey Zigachev 	return 0;
1146b843c749SSergey Zigachev }
1147b843c749SSergey Zigachev 
gmc_v9_0_suspend(void * handle)1148b843c749SSergey Zigachev static int gmc_v9_0_suspend(void *handle)
1149b843c749SSergey Zigachev {
1150b843c749SSergey Zigachev 	int r;
1151b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1152b843c749SSergey Zigachev 
1153b843c749SSergey Zigachev 	r = gmc_v9_0_hw_fini(adev);
1154b843c749SSergey Zigachev 	if (r)
1155b843c749SSergey Zigachev 		return r;
1156b843c749SSergey Zigachev 
1157b843c749SSergey Zigachev 	gmc_v9_0_save_registers(adev);
1158b843c749SSergey Zigachev 
1159b843c749SSergey Zigachev 	return 0;
1160b843c749SSergey Zigachev }
1161b843c749SSergey Zigachev 
gmc_v9_0_resume(void * handle)1162b843c749SSergey Zigachev static int gmc_v9_0_resume(void *handle)
1163b843c749SSergey Zigachev {
1164b843c749SSergey Zigachev 	int r;
1165b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1166b843c749SSergey Zigachev 
1167b843c749SSergey Zigachev 	gmc_v9_0_restore_registers(adev);
1168b843c749SSergey Zigachev 	r = gmc_v9_0_hw_init(adev);
1169b843c749SSergey Zigachev 	if (r)
1170b843c749SSergey Zigachev 		return r;
1171b843c749SSergey Zigachev 
1172b843c749SSergey Zigachev 	amdgpu_vmid_reset_all(adev);
1173b843c749SSergey Zigachev 
1174b843c749SSergey Zigachev 	return 0;
1175b843c749SSergey Zigachev }
1176b843c749SSergey Zigachev 
gmc_v9_0_is_idle(void * handle)1177b843c749SSergey Zigachev static bool gmc_v9_0_is_idle(void *handle)
1178b843c749SSergey Zigachev {
1179b843c749SSergey Zigachev 	/* MC is always ready in GMC v9.*/
1180b843c749SSergey Zigachev 	return true;
1181b843c749SSergey Zigachev }
1182b843c749SSergey Zigachev 
gmc_v9_0_wait_for_idle(void * handle)1183b843c749SSergey Zigachev static int gmc_v9_0_wait_for_idle(void *handle)
1184b843c749SSergey Zigachev {
1185b843c749SSergey Zigachev 	/* There is no need to wait for MC idle in GMC v9.*/
1186b843c749SSergey Zigachev 	return 0;
1187b843c749SSergey Zigachev }
1188b843c749SSergey Zigachev 
gmc_v9_0_soft_reset(void * handle)1189b843c749SSergey Zigachev static int gmc_v9_0_soft_reset(void *handle)
1190b843c749SSergey Zigachev {
1191b843c749SSergey Zigachev 	/* XXX for emulation.*/
1192b843c749SSergey Zigachev 	return 0;
1193b843c749SSergey Zigachev }
1194b843c749SSergey Zigachev 
gmc_v9_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1195b843c749SSergey Zigachev static int gmc_v9_0_set_clockgating_state(void *handle,
1196b843c749SSergey Zigachev 					enum amd_clockgating_state state)
1197b843c749SSergey Zigachev {
1198b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1199b843c749SSergey Zigachev 
1200b843c749SSergey Zigachev 	return mmhub_v1_0_set_clockgating(adev, state);
1201b843c749SSergey Zigachev }
1202b843c749SSergey Zigachev 
gmc_v9_0_get_clockgating_state(void * handle,u32 * flags)1203b843c749SSergey Zigachev static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1204b843c749SSergey Zigachev {
1205b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1206b843c749SSergey Zigachev 
1207b843c749SSergey Zigachev 	mmhub_v1_0_get_clockgating(adev, flags);
1208b843c749SSergey Zigachev }
1209b843c749SSergey Zigachev 
gmc_v9_0_set_powergating_state(void * handle,enum amd_powergating_state state)1210b843c749SSergey Zigachev static int gmc_v9_0_set_powergating_state(void *handle,
1211b843c749SSergey Zigachev 					enum amd_powergating_state state)
1212b843c749SSergey Zigachev {
1213b843c749SSergey Zigachev 	return 0;
1214b843c749SSergey Zigachev }
1215b843c749SSergey Zigachev 
1216b843c749SSergey Zigachev const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1217b843c749SSergey Zigachev 	.name = "gmc_v9_0",
1218b843c749SSergey Zigachev 	.early_init = gmc_v9_0_early_init,
1219b843c749SSergey Zigachev 	.late_init = gmc_v9_0_late_init,
1220b843c749SSergey Zigachev 	.sw_init = gmc_v9_0_sw_init,
1221b843c749SSergey Zigachev 	.sw_fini = gmc_v9_0_sw_fini,
1222b843c749SSergey Zigachev 	.hw_init = gmc_v9_0_hw_init,
1223b843c749SSergey Zigachev 	.hw_fini = gmc_v9_0_hw_fini,
1224b843c749SSergey Zigachev 	.suspend = gmc_v9_0_suspend,
1225b843c749SSergey Zigachev 	.resume = gmc_v9_0_resume,
1226b843c749SSergey Zigachev 	.is_idle = gmc_v9_0_is_idle,
1227b843c749SSergey Zigachev 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1228b843c749SSergey Zigachev 	.soft_reset = gmc_v9_0_soft_reset,
1229b843c749SSergey Zigachev 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1230b843c749SSergey Zigachev 	.set_powergating_state = gmc_v9_0_set_powergating_state,
1231b843c749SSergey Zigachev 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1232b843c749SSergey Zigachev };
1233b843c749SSergey Zigachev 
1234b843c749SSergey Zigachev const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1235b843c749SSergey Zigachev {
1236b843c749SSergey Zigachev 	.type = AMD_IP_BLOCK_TYPE_GMC,
1237b843c749SSergey Zigachev 	.major = 9,
1238b843c749SSergey Zigachev 	.minor = 0,
1239b843c749SSergey Zigachev 	.rev = 0,
1240b843c749SSergey Zigachev 	.funcs = &gmc_v9_0_ip_funcs,
1241b843c749SSergey Zigachev };
1242