xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_gmc_v10_0.c (revision 07eb61ce39682b60a0b29b81ad8b6af16527a557)
1 /*	$NetBSD: amdgpu_gmc_v10_0.c,v 1.5 2021/12/19 12:31:45 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2019 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_gmc_v10_0.c,v 1.5 2021/12/19 12:31:45 riastradh Exp $");
27 
28 #include <linux/firmware.h>
29 #include <linux/pci.h>
30 #include "amdgpu.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "gmc_v10_0.h"
33 
34 #include "hdp/hdp_5_0_0_offset.h"
35 #include "hdp/hdp_5_0_0_sh_mask.h"
36 #include "gc/gc_10_1_0_sh_mask.h"
37 #include "mmhub/mmhub_2_0_0_sh_mask.h"
38 #include "athub/athub_2_0_0_sh_mask.h"
39 #include "athub/athub_2_0_0_offset.h"
40 #include "dcn/dcn_2_0_0_offset.h"
41 #include "dcn/dcn_2_0_0_sh_mask.h"
42 #include "oss/osssys_5_0_0_offset.h"
43 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
44 #include "navi10_enum.h"
45 
46 #include "soc15.h"
47 #include "soc15d.h"
48 #include "soc15_common.h"
49 
50 #include "nbio_v2_3.h"
51 
52 #include "gfxhub_v2_0.h"
53 #include "mmhub_v2_0.h"
54 #include "athub_v2_0.h"
55 /* XXX Move this macro to navi10 header file, which is like vid.h for VI.*/
56 #define AMDGPU_NUM_OF_VMIDS			8
57 
58 #if 0
59 static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
60 {
61 	/* TODO add golden setting for hdp */
62 };
63 #endif
64 
65 static int
gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)66 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
67 				   struct amdgpu_irq_src *src, unsigned type,
68 				   enum amdgpu_interrupt_state state)
69 {
70 	struct amdgpu_vmhub *hub;
71 	u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i;
72 
73 	bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
74 		GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
75 		GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
76 		GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
77 		GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
78 		GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
79 		GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
80 
81 	bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
82 		MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
83 		MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
84 		MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
85 		MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
86 		MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
87 		MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
88 
89 	switch (state) {
90 	case AMDGPU_IRQ_STATE_DISABLE:
91 		/* MM HUB */
92 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
93 		for (i = 0; i < 16; i++) {
94 			reg = hub->vm_context0_cntl + i;
95 			tmp = RREG32(reg);
96 			tmp &= ~bits[AMDGPU_MMHUB_0];
97 			WREG32(reg, tmp);
98 		}
99 
100 		/* GFX HUB */
101 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
102 		for (i = 0; i < 16; i++) {
103 			reg = hub->vm_context0_cntl + i;
104 			tmp = RREG32(reg);
105 			tmp &= ~bits[AMDGPU_GFXHUB_0];
106 			WREG32(reg, tmp);
107 		}
108 		break;
109 	case AMDGPU_IRQ_STATE_ENABLE:
110 		/* MM HUB */
111 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
112 		for (i = 0; i < 16; i++) {
113 			reg = hub->vm_context0_cntl + i;
114 			tmp = RREG32(reg);
115 			tmp |= bits[AMDGPU_MMHUB_0];
116 			WREG32(reg, tmp);
117 		}
118 
119 		/* GFX HUB */
120 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
121 		for (i = 0; i < 16; i++) {
122 			reg = hub->vm_context0_cntl + i;
123 			tmp = RREG32(reg);
124 			tmp |= bits[AMDGPU_GFXHUB_0];
125 			WREG32(reg, tmp);
126 		}
127 		break;
128 	default:
129 		break;
130 	}
131 
132 	return 0;
133 }
134 
gmc_v10_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)135 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
136 				       struct amdgpu_irq_src *source,
137 				       struct amdgpu_iv_entry *entry)
138 {
139 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
140 	uint32_t status = 0;
141 	u64 addr;
142 
143 	addr = (u64)entry->src_data[0] << 12;
144 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
145 
146 	if (!amdgpu_sriov_vf(adev)) {
147 		/*
148 		 * Issue a dummy read to wait for the status register to
149 		 * be updated to avoid reading an incorrect value due to
150 		 * the new fast GRBM interface.
151 		 */
152 		if (entry->vmid_src == AMDGPU_GFXHUB_0)
153 			RREG32(hub->vm_l2_pro_fault_status);
154 
155 		status = RREG32(hub->vm_l2_pro_fault_status);
156 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
157 	}
158 
159 	if (printk_ratelimit()) {
160 		struct amdgpu_task_info task_info;
161 
162 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
163 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
164 
165 		dev_err(adev->dev,
166 			"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
167 			"for process %s pid %d thread %s pid %d)\n",
168 			entry->vmid_src ? "mmhub" : "gfxhub",
169 			entry->src_id, entry->ring_id, entry->vmid,
170 			entry->pasid, task_info.process_name, task_info.tgid,
171 			task_info.task_name, task_info.pid);
172 		dev_err(adev->dev, "  in page starting at address 0x%016"PRIx64" from client %d\n",
173 			addr, entry->client_id);
174 		if (!amdgpu_sriov_vf(adev)) {
175 			dev_err(adev->dev,
176 				"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
177 				status);
178 			dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
179 				REG_GET_FIELD(status,
180 				GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
181 			dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
182 				REG_GET_FIELD(status,
183 				GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
184 			dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
185 				REG_GET_FIELD(status,
186 				GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
187 			dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
188 				REG_GET_FIELD(status,
189 				GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
190 			dev_err(adev->dev, "\t RW: 0x%lx\n",
191 				REG_GET_FIELD(status,
192 				GCVM_L2_PROTECTION_FAULT_STATUS, RW));
193 		}
194 	}
195 
196 	return 0;
197 }
198 
199 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
200 	.set = gmc_v10_0_vm_fault_interrupt_state,
201 	.process = gmc_v10_0_process_interrupt,
202 };
203 
gmc_v10_0_set_irq_funcs(struct amdgpu_device * adev)204 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
205 {
206 	adev->gmc.vm_fault.num_types = 1;
207 	adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
208 }
209 
gmc_v10_0_get_invalidate_req(unsigned int vmid,uint32_t flush_type)210 static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
211 					     uint32_t flush_type)
212 {
213 	u32 req = 0;
214 
215 	/* invalidate using legacy mode on vmid*/
216 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
217 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
218 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
219 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
220 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
221 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
222 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
223 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
224 	req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
225 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
226 
227 	return req;
228 }
229 
230 /**
231  * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
232  *
233  * @adev: amdgpu_device pointer
234  * @vmhub: vmhub type
235  *
236  */
gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)237 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
238 				       uint32_t vmhub)
239 {
240 	return ((vmhub == AMDGPU_MMHUB_0 ||
241 		 vmhub == AMDGPU_MMHUB_1) &&
242 		(!amdgpu_sriov_vf(adev)));
243 }
244 
gmc_v10_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)245 static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
246 					struct amdgpu_device *adev,
247 					uint8_t vmid, uint16_t *p_pasid)
248 {
249 	uint32_t value;
250 
251 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
252 		     + vmid);
253 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
254 
255 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
256 }
257 
258 /*
259  * GART
260  * VMID 0 is the physical GPU addresses as used by the kernel.
261  * VMIDs 1-15 are used for userspace clients and are handled
262  * by the amdgpu vm/hsa code.
263  */
264 
gmc_v10_0_flush_vm_hub(struct amdgpu_device * adev,uint32_t vmid,unsigned int vmhub,uint32_t flush_type)265 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
266 				   unsigned int vmhub, uint32_t flush_type)
267 {
268 	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
269 	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
270 	u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
271 	u32 tmp;
272 	/* Use register 17 for GART */
273 	const unsigned eng = 17;
274 	unsigned int i;
275 
276 	spin_lock(&adev->gmc.invalidate_lock);
277 	/*
278 	 * It may lose gpuvm invalidate acknowldege state across power-gating
279 	 * off cycle, add semaphore acquire before invalidation and semaphore
280 	 * release after invalidation to avoid entering power gated state
281 	 * to WA the Issue
282 	 */
283 
284 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
285 	if (use_semaphore) {
286 		for (i = 0; i < adev->usec_timeout; i++) {
287 			/* a read return value of 1 means semaphore acuqire */
288 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
289 			if (tmp & 0x1)
290 				break;
291 			udelay(1);
292 		}
293 
294 		if (i >= adev->usec_timeout)
295 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
296 	}
297 
298 	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
299 
300 	/*
301 	 * Issue a dummy read to wait for the ACK register to be cleared
302 	 * to avoid a false ACK due to the new fast GRBM interface.
303 	 */
304 	if (vmhub == AMDGPU_GFXHUB_0)
305 		RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
306 
307 	/* Wait for ACK with a delay.*/
308 	for (i = 0; i < adev->usec_timeout; i++) {
309 		tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
310 		tmp &= 1 << vmid;
311 		if (tmp)
312 			break;
313 
314 		udelay(1);
315 	}
316 
317 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
318 	if (use_semaphore)
319 		/*
320 		 * add semaphore release after invalidation,
321 		 * write with 0 means semaphore release
322 		 */
323 		WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
324 
325 	spin_unlock(&adev->gmc.invalidate_lock);
326 
327 	if (i < adev->usec_timeout)
328 		return;
329 
330 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
331 }
332 
333 /**
334  * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
335  *
336  * @adev: amdgpu_device pointer
337  * @vmid: vm instance to flush
338  *
339  * Flush the TLB for the requested page table.
340  */
gmc_v10_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)341 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
342 					uint32_t vmhub, uint32_t flush_type)
343 {
344 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
345 	struct dma_fence *fence;
346 	struct amdgpu_job *job;
347 
348 	int r;
349 
350 	/* flush hdp cache */
351 	adev->nbio.funcs->hdp_flush(adev, NULL);
352 
353 	mutex_lock(&adev->mman.gtt_window_lock);
354 
355 	if (vmhub == AMDGPU_MMHUB_0) {
356 		gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
357 		mutex_unlock(&adev->mman.gtt_window_lock);
358 		return;
359 	}
360 
361 	BUG_ON(vmhub != AMDGPU_GFXHUB_0);
362 
363 	if (!adev->mman.buffer_funcs_enabled ||
364 	    !adev->ib_pool_ready ||
365 	    adev->in_gpu_reset ||
366 	    ring->sched.ready == false) {
367 		gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
368 		mutex_unlock(&adev->mman.gtt_window_lock);
369 		return;
370 	}
371 
372 	/* The SDMA on Navi has a bug which can theoretically result in memory
373 	 * corruption if an invalidation happens at the same time as an VA
374 	 * translation. Avoid this by doing the invalidation from the SDMA
375 	 * itself.
376 	 */
377 	r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job);
378 	if (r)
379 		goto error_alloc;
380 
381 	job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
382 	job->vm_needs_flush = true;
383 	job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
384 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
385 	r = amdgpu_job_submit(job, &adev->mman.entity,
386 			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
387 	if (r)
388 		goto error_submit;
389 
390 	mutex_unlock(&adev->mman.gtt_window_lock);
391 
392 	dma_fence_wait(fence, false);
393 	dma_fence_put(fence);
394 
395 	return;
396 
397 error_submit:
398 	amdgpu_job_free(job);
399 
400 error_alloc:
401 	mutex_unlock(&adev->mman.gtt_window_lock);
402 	DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
403 }
404 
405 /**
406  * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
407  *
408  * @adev: amdgpu_device pointer
409  * @pasid: pasid to be flush
410  *
411  * Flush the TLB for the requested pasid.
412  */
gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub)413 static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
414 					uint16_t pasid, uint32_t flush_type,
415 					bool all_hub)
416 {
417 	int vmid, i;
418 	signed long r;
419 	uint32_t seq;
420 	uint16_t queried_pasid;
421 	bool ret;
422 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
423 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
424 
425 	if (amdgpu_emu_mode == 0 && ring->sched.ready) {
426 		spin_lock(&adev->gfx.kiq.ring_lock);
427 		/* 2 dwords flush + 8 dwords fence */
428 		amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
429 		kiq->pmf->kiq_invalidate_tlbs(ring,
430 					pasid, flush_type, all_hub);
431 		amdgpu_fence_emit_polling(ring, &seq);
432 		amdgpu_ring_commit(ring);
433 		spin_unlock(&adev->gfx.kiq.ring_lock);
434 		r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
435 		if (r < 1) {
436 			DRM_ERROR("wait for kiq fence error: %ld.\n", r);
437 			return -ETIME;
438 		}
439 
440 		return 0;
441 	}
442 
443 	for (vmid = 1; vmid < 16; vmid++) {
444 
445 		ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
446 				&queried_pasid);
447 		if (ret	&& queried_pasid == pasid) {
448 			if (all_hub) {
449 				for (i = 0; i < adev->num_vmhubs; i++)
450 					gmc_v10_0_flush_gpu_tlb(adev, vmid,
451 							i, flush_type);
452 			} else {
453 				gmc_v10_0_flush_gpu_tlb(adev, vmid,
454 						AMDGPU_GFXHUB_0, flush_type);
455 			}
456 			break;
457 		}
458 	}
459 
460 	return 0;
461 }
462 
gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)463 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
464 					     unsigned vmid, uint64_t pd_addr)
465 {
466 	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
467 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
468 	uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
469 	unsigned eng = ring->vm_inv_eng;
470 
471 	/*
472 	 * It may lose gpuvm invalidate acknowldege state across power-gating
473 	 * off cycle, add semaphore acquire before invalidation and semaphore
474 	 * release after invalidation to avoid entering power gated state
475 	 * to WA the Issue
476 	 */
477 
478 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
479 	if (use_semaphore)
480 		/* a read return value of 1 means semaphore acuqire */
481 		amdgpu_ring_emit_reg_wait(ring,
482 					  hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
483 
484 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
485 			      lower_32_bits(pd_addr));
486 
487 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
488 			      upper_32_bits(pd_addr));
489 
490 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
491 					    hub->vm_inv_eng0_ack + eng,
492 					    req, 1 << vmid);
493 
494 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
495 	if (use_semaphore)
496 		/*
497 		 * add semaphore release after invalidation,
498 		 * write with 0 means semaphore release
499 		 */
500 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
501 
502 	return pd_addr;
503 }
504 
gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned vmid,unsigned pasid)505 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
506 					 unsigned pasid)
507 {
508 	struct amdgpu_device *adev = ring->adev;
509 	uint32_t reg;
510 
511 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
512 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
513 	else
514 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
515 
516 	amdgpu_ring_emit_wreg(ring, reg, pasid);
517 }
518 
519 /*
520  * PTE format on NAVI 10:
521  * 63:59 reserved
522  * 58:57 reserved
523  * 56 F
524  * 55 L
525  * 54 reserved
526  * 53:52 SW
527  * 51 T
528  * 50:48 mtype
529  * 47:12 4k physical page base address
530  * 11:7 fragment
531  * 6 write
532  * 5 read
533  * 4 exe
534  * 3 Z
535  * 2 snooped
536  * 1 system
537  * 0 valid
538  *
539  * PDE format on NAVI 10:
540  * 63:59 block fragment size
541  * 58:55 reserved
542  * 54 P
543  * 53:48 reserved
544  * 47:6 physical base address of PD or PTE
545  * 5:3 reserved
546  * 2 C
547  * 1 system
548  * 0 valid
549  */
550 
gmc_v10_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)551 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
552 {
553 	switch (flags) {
554 	case AMDGPU_VM_MTYPE_DEFAULT:
555 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
556 	case AMDGPU_VM_MTYPE_NC:
557 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
558 	case AMDGPU_VM_MTYPE_WC:
559 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
560 	case AMDGPU_VM_MTYPE_CC:
561 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
562 	case AMDGPU_VM_MTYPE_UC:
563 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
564 	default:
565 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
566 	}
567 }
568 
gmc_v10_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)569 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
570 				 uint64_t *addr, uint64_t *flags)
571 {
572 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
573 		*addr = adev->vm_manager.vram_base_offset + *addr -
574 			adev->gmc.vram_start;
575 	BUG_ON(*addr & 0xFFFF00000000003FULL);
576 
577 	if (!adev->gmc.translate_further)
578 		return;
579 
580 	if (level == AMDGPU_VM_PDB1) {
581 		/* Set the block fragment size */
582 		if (!(*flags & AMDGPU_PDE_PTE))
583 			*flags |= AMDGPU_PDE_BFS(0x9);
584 
585 	} else if (level == AMDGPU_VM_PDB0) {
586 		if (*flags & AMDGPU_PDE_PTE)
587 			*flags &= ~AMDGPU_PDE_PTE;
588 		else
589 			*flags |= AMDGPU_PTE_TF;
590 	}
591 }
592 
gmc_v10_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)593 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
594 				 struct amdgpu_bo_va_mapping *mapping,
595 				 uint64_t *flags)
596 {
597 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
598 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
599 
600 	*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
601 	*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
602 
603 	if (mapping->flags & AMDGPU_PTE_PRT) {
604 		*flags |= AMDGPU_PTE_PRT;
605 		*flags |= AMDGPU_PTE_SNOOPED;
606 		*flags |= AMDGPU_PTE_LOG;
607 		*flags |= AMDGPU_PTE_SYSTEM;
608 		*flags &= ~AMDGPU_PTE_VALID;
609 	}
610 }
611 
612 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
613 	.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
614 	.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
615 	.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
616 	.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
617 	.map_mtype = gmc_v10_0_map_mtype,
618 	.get_vm_pde = gmc_v10_0_get_vm_pde,
619 	.get_vm_pte = gmc_v10_0_get_vm_pte
620 };
621 
gmc_v10_0_set_gmc_funcs(struct amdgpu_device * adev)622 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
623 {
624 	if (adev->gmc.gmc_funcs == NULL)
625 		adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
626 }
627 
gmc_v10_0_early_init(void * handle)628 static int gmc_v10_0_early_init(void *handle)
629 {
630 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
631 
632 	gmc_v10_0_set_gmc_funcs(adev);
633 	gmc_v10_0_set_irq_funcs(adev);
634 
635 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
636 	adev->gmc.shared_aperture_end =
637 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
638 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
639 	adev->gmc.private_aperture_end =
640 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
641 
642 	return 0;
643 }
644 
gmc_v10_0_late_init(void * handle)645 static int gmc_v10_0_late_init(void *handle)
646 {
647 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
648 	int r;
649 
650 	amdgpu_bo_late_init(adev);
651 
652 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
653 	if (r)
654 		return r;
655 
656 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
657 }
658 
gmc_v10_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)659 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
660 					struct amdgpu_gmc *mc)
661 {
662 	u64 base = 0;
663 
664 	base = gfxhub_v2_0_get_fb_location(adev);
665 
666 	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
667 	amdgpu_gmc_gart_location(adev, mc);
668 
669 	/* base offset of vram pages */
670 	adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
671 }
672 
673 /**
674  * gmc_v10_0_mc_init - initialize the memory controller driver params
675  *
676  * @adev: amdgpu_device pointer
677  *
678  * Look up the amount of vram, vram width, and decide how to place
679  * vram and gart within the GPU's physical address space.
680  * Returns 0 for success.
681  */
gmc_v10_0_mc_init(struct amdgpu_device * adev)682 static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
683 {
684 	/* Could aper size report 0 ? */
685 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
686 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
687 
688 #ifdef __NetBSD__
689 	adev->gmc.aper_tag = adev->pdev->pd_pa.pa_memt;
690 #endif
691 
692 	/* size in MB on si */
693 	adev->gmc.mc_vram_size =
694 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
695 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
696 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
697 
698 	/* In case the PCI BAR is larger than the actual amount of vram */
699 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
700 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
701 
702 	/* set the gart size */
703 	if (amdgpu_gart_size == -1) {
704 		switch (adev->asic_type) {
705 		case CHIP_NAVI10:
706 		case CHIP_NAVI14:
707 		case CHIP_NAVI12:
708 		default:
709 			adev->gmc.gart_size = 512ULL << 20;
710 			break;
711 		}
712 	} else
713 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
714 
715 	gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
716 
717 	return 0;
718 }
719 
gmc_v10_0_gart_init(struct amdgpu_device * adev)720 static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
721 {
722 	int r;
723 
724 	if (adev->gart.bo) {
725 		WARN(1, "NAVI10 PCIE GART already initialized\n");
726 		return 0;
727 	}
728 
729 	/* Initialize common gart structure */
730 	r = amdgpu_gart_init(adev);
731 	if (r)
732 		return r;
733 
734 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
735 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
736 				 AMDGPU_PTE_EXECUTABLE;
737 
738 	return amdgpu_gart_table_vram_alloc(adev);
739 }
740 
gmc_v10_0_get_vbios_fb_size(struct amdgpu_device * adev)741 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
742 {
743 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
744 	unsigned size;
745 
746 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
747 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
748 	} else {
749 		u32 viewport;
750 		u32 pitch;
751 
752 		viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
753 		pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
754 		size = (REG_GET_FIELD(viewport,
755 					HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
756 				REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
757 				4);
758 	}
759 	/* return 0 if the pre-OS buffer uses up most of vram */
760 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) {
761 		DRM_ERROR("Warning: pre-OS buffer uses most of vram, \
762 				be aware of gart table overwrite\n");
763 		return 0;
764 	}
765 
766 	return size;
767 }
768 
769 
770 
gmc_v10_0_sw_init(void * handle)771 static int gmc_v10_0_sw_init(void *handle)
772 {
773 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
774 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
775 
776 	gfxhub_v2_0_init(adev);
777 	mmhub_v2_0_init(adev);
778 
779 	spin_lock_init(&adev->gmc.invalidate_lock);
780 
781 	r = amdgpu_atomfirmware_get_vram_info(adev,
782 		&vram_width, &vram_type, &vram_vendor);
783 	if (!amdgpu_emu_mode)
784 		adev->gmc.vram_width = vram_width;
785 	else
786 		adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
787 
788 	adev->gmc.vram_type = vram_type;
789 	adev->gmc.vram_vendor = vram_vendor;
790 	switch (adev->asic_type) {
791 	case CHIP_NAVI10:
792 	case CHIP_NAVI14:
793 	case CHIP_NAVI12:
794 		adev->num_vmhubs = 2;
795 		/*
796 		 * To fulfill 4-level page support,
797 		 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
798 		 * block size 512 (9bit)
799 		 */
800 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
801 		break;
802 	default:
803 		break;
804 	}
805 
806 	/* This interrupt is VMC page fault.*/
807 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
808 			      VMC_1_0__SRCID__VM_FAULT,
809 			      &adev->gmc.vm_fault);
810 
811 	if (r)
812 		return r;
813 
814 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
815 			      UTCL2_1_0__SRCID__FAULT,
816 			      &adev->gmc.vm_fault);
817 	if (r)
818 		return r;
819 
820 	/*
821 	 * Set the internal MC address mask This is the max address of the GPU's
822 	 * internal address space.
823 	 */
824 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
825 
826 #ifdef __NetBSD__
827 	r = drm_limit_dma_space(adev->ddev, 0, DMA_BIT_MASK(44));
828 #else
829 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
830 #endif
831 	if (r) {
832 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
833 		return r;
834 	}
835 
836 	r = gmc_v10_0_mc_init(adev);
837 	if (r)
838 		return r;
839 
840 	adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
841 
842 	/* Memory manager */
843 	r = amdgpu_bo_init(adev);
844 	if (r)
845 		return r;
846 
847 	r = gmc_v10_0_gart_init(adev);
848 	if (r)
849 		return r;
850 
851 	/*
852 	 * number of VMs
853 	 * VMID 0 is reserved for System
854 	 * amdgpu graphics/compute will use VMIDs 1-7
855 	 * amdkfd will use VMIDs 8-15
856 	 */
857 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
858 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
859 
860 	amdgpu_vm_manager_init(adev);
861 
862 	return 0;
863 }
864 
865 /**
866  * gmc_v8_0_gart_fini - vm fini callback
867  *
868  * @adev: amdgpu_device pointer
869  *
870  * Tears down the driver GART/VM setup (CIK).
871  */
gmc_v10_0_gart_fini(struct amdgpu_device * adev)872 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
873 {
874 	amdgpu_gart_table_vram_free(adev);
875 	amdgpu_gart_fini(adev);
876 }
877 
gmc_v10_0_sw_fini(void * handle)878 static int gmc_v10_0_sw_fini(void *handle)
879 {
880 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
881 
882 	amdgpu_vm_manager_fini(adev);
883 	gmc_v10_0_gart_fini(adev);
884 	amdgpu_gem_force_release(adev);
885 	amdgpu_bo_fini(adev);
886 
887 	spin_lock_destroy(&adev->gmc.invalidate_lock);
888 
889 	return 0;
890 }
891 
gmc_v10_0_init_golden_registers(struct amdgpu_device * adev)892 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
893 {
894 	switch (adev->asic_type) {
895 	case CHIP_NAVI10:
896 	case CHIP_NAVI14:
897 	case CHIP_NAVI12:
898 		break;
899 	default:
900 		break;
901 	}
902 }
903 
904 /**
905  * gmc_v10_0_gart_enable - gart enable
906  *
907  * @adev: amdgpu_device pointer
908  */
gmc_v10_0_gart_enable(struct amdgpu_device * adev)909 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
910 {
911 	int r;
912 	bool value;
913 	u32 tmp;
914 
915 	if (adev->gart.bo == NULL) {
916 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
917 		return -EINVAL;
918 	}
919 
920 	r = amdgpu_gart_table_vram_pin(adev);
921 	if (r)
922 		return r;
923 
924 	r = gfxhub_v2_0_gart_enable(adev);
925 	if (r)
926 		return r;
927 
928 	r = mmhub_v2_0_gart_enable(adev);
929 	if (r)
930 		return r;
931 
932 	tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
933 	tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
934 	WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
935 
936 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
937 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
938 
939 	/* Flush HDP after it is initialized */
940 	adev->nbio.funcs->hdp_flush(adev, NULL);
941 
942 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
943 		false : true;
944 
945 	gfxhub_v2_0_set_fault_enable_default(adev, value);
946 	mmhub_v2_0_set_fault_enable_default(adev, value);
947 	gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
948 	gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
949 
950 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
951 		 (unsigned)(adev->gmc.gart_size >> 20),
952 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
953 
954 	adev->gart.ready = true;
955 
956 	return 0;
957 }
958 
gmc_v10_0_hw_init(void * handle)959 static int gmc_v10_0_hw_init(void *handle)
960 {
961 	int r;
962 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
963 
964 	/* The sequence of these two function calls matters.*/
965 	gmc_v10_0_init_golden_registers(adev);
966 
967 	r = gmc_v10_0_gart_enable(adev);
968 	if (r)
969 		return r;
970 
971 	return 0;
972 }
973 
974 /**
975  * gmc_v10_0_gart_disable - gart disable
976  *
977  * @adev: amdgpu_device pointer
978  *
979  * This disables all VM page table.
980  */
gmc_v10_0_gart_disable(struct amdgpu_device * adev)981 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
982 {
983 	gfxhub_v2_0_gart_disable(adev);
984 	mmhub_v2_0_gart_disable(adev);
985 	amdgpu_gart_table_vram_unpin(adev);
986 }
987 
gmc_v10_0_hw_fini(void * handle)988 static int gmc_v10_0_hw_fini(void *handle)
989 {
990 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
991 
992 	if (amdgpu_sriov_vf(adev)) {
993 		/* full access mode, so don't touch any GMC register */
994 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
995 		return 0;
996 	}
997 
998 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
999 	gmc_v10_0_gart_disable(adev);
1000 
1001 	return 0;
1002 }
1003 
gmc_v10_0_suspend(void * handle)1004 static int gmc_v10_0_suspend(void *handle)
1005 {
1006 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1007 
1008 	gmc_v10_0_hw_fini(adev);
1009 
1010 	return 0;
1011 }
1012 
gmc_v10_0_resume(void * handle)1013 static int gmc_v10_0_resume(void *handle)
1014 {
1015 	int r;
1016 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1017 
1018 	r = gmc_v10_0_hw_init(adev);
1019 	if (r)
1020 		return r;
1021 
1022 	amdgpu_vmid_reset_all(adev);
1023 
1024 	return 0;
1025 }
1026 
gmc_v10_0_is_idle(void * handle)1027 static bool gmc_v10_0_is_idle(void *handle)
1028 {
1029 	/* MC is always ready in GMC v10.*/
1030 	return true;
1031 }
1032 
gmc_v10_0_wait_for_idle(void * handle)1033 static int gmc_v10_0_wait_for_idle(void *handle)
1034 {
1035 	/* There is no need to wait for MC idle in GMC v10.*/
1036 	return 0;
1037 }
1038 
gmc_v10_0_soft_reset(void * handle)1039 static int gmc_v10_0_soft_reset(void *handle)
1040 {
1041 	return 0;
1042 }
1043 
gmc_v10_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1044 static int gmc_v10_0_set_clockgating_state(void *handle,
1045 					   enum amd_clockgating_state state)
1046 {
1047 	int r;
1048 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1049 
1050 	r = mmhub_v2_0_set_clockgating(adev, state);
1051 	if (r)
1052 		return r;
1053 
1054 	return athub_v2_0_set_clockgating(adev, state);
1055 }
1056 
gmc_v10_0_get_clockgating_state(void * handle,u32 * flags)1057 static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1058 {
1059 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1060 
1061 	mmhub_v2_0_get_clockgating(adev, flags);
1062 
1063 	athub_v2_0_get_clockgating(adev, flags);
1064 }
1065 
gmc_v10_0_set_powergating_state(void * handle,enum amd_powergating_state state)1066 static int gmc_v10_0_set_powergating_state(void *handle,
1067 					   enum amd_powergating_state state)
1068 {
1069 	return 0;
1070 }
1071 
1072 const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1073 	.name = "gmc_v10_0",
1074 	.early_init = gmc_v10_0_early_init,
1075 	.late_init = gmc_v10_0_late_init,
1076 	.sw_init = gmc_v10_0_sw_init,
1077 	.sw_fini = gmc_v10_0_sw_fini,
1078 	.hw_init = gmc_v10_0_hw_init,
1079 	.hw_fini = gmc_v10_0_hw_fini,
1080 	.suspend = gmc_v10_0_suspend,
1081 	.resume = gmc_v10_0_resume,
1082 	.is_idle = gmc_v10_0_is_idle,
1083 	.wait_for_idle = gmc_v10_0_wait_for_idle,
1084 	.soft_reset = gmc_v10_0_soft_reset,
1085 	.set_clockgating_state = gmc_v10_0_set_clockgating_state,
1086 	.set_powergating_state = gmc_v10_0_set_powergating_state,
1087 	.get_clockgating_state = gmc_v10_0_get_clockgating_state,
1088 };
1089 
1090 const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1091 {
1092 	.type = AMD_IP_BLOCK_TYPE_GMC,
1093 	.major = 10,
1094 	.minor = 0,
1095 	.rev = 0,
1096 	.funcs = &gmc_v10_0_ip_funcs,
1097 };
1098