xref: /dflybsd-src/sys/dev/drm/amd/amdgpu/amdgpu_ib.c (revision b843c749addef9340ee7d4e250b09fdd492602a1)
1*b843c749SSergey Zigachev /*
2*b843c749SSergey Zigachev  * Copyright 2008 Advanced Micro Devices, Inc.
3*b843c749SSergey Zigachev  * Copyright 2008 Red Hat Inc.
4*b843c749SSergey Zigachev  * Copyright 2009 Jerome Glisse.
5*b843c749SSergey Zigachev  *
6*b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
7*b843c749SSergey Zigachev  * copy of this software and associated documentation files (the "Software"),
8*b843c749SSergey Zigachev  * to deal in the Software without restriction, including without limitation
9*b843c749SSergey Zigachev  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10*b843c749SSergey Zigachev  * and/or sell copies of the Software, and to permit persons to whom the
11*b843c749SSergey Zigachev  * Software is furnished to do so, subject to the following conditions:
12*b843c749SSergey Zigachev  *
13*b843c749SSergey Zigachev  * The above copyright notice and this permission notice shall be included in
14*b843c749SSergey Zigachev  * all copies or substantial portions of the Software.
15*b843c749SSergey Zigachev  *
16*b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19*b843c749SSergey Zigachev  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20*b843c749SSergey Zigachev  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21*b843c749SSergey Zigachev  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22*b843c749SSergey Zigachev  * OTHER DEALINGS IN THE SOFTWARE.
23*b843c749SSergey Zigachev  *
24*b843c749SSergey Zigachev  * Authors: Dave Airlie
25*b843c749SSergey Zigachev  *          Alex Deucher
26*b843c749SSergey Zigachev  *          Jerome Glisse
27*b843c749SSergey Zigachev  *          Christian König
28*b843c749SSergey Zigachev  */
29*b843c749SSergey Zigachev #include <linux/seq_file.h>
30*b843c749SSergey Zigachev #include <linux/slab.h>
31*b843c749SSergey Zigachev #include <drm/drmP.h>
32*b843c749SSergey Zigachev #include <drm/amdgpu_drm.h>
33*b843c749SSergey Zigachev #include "amdgpu.h"
34*b843c749SSergey Zigachev #include "atom.h"
35*b843c749SSergey Zigachev 
36*b843c749SSergey Zigachev #define AMDGPU_IB_TEST_TIMEOUT	msecs_to_jiffies(1000)
37*b843c749SSergey Zigachev 
38*b843c749SSergey Zigachev /*
39*b843c749SSergey Zigachev  * IB
40*b843c749SSergey Zigachev  * IBs (Indirect Buffers) and areas of GPU accessible memory where
41*b843c749SSergey Zigachev  * commands are stored.  You can put a pointer to the IB in the
42*b843c749SSergey Zigachev  * command ring and the hw will fetch the commands from the IB
43*b843c749SSergey Zigachev  * and execute them.  Generally userspace acceleration drivers
44*b843c749SSergey Zigachev  * produce command buffers which are send to the kernel and
45*b843c749SSergey Zigachev  * put in IBs for execution by the requested ring.
46*b843c749SSergey Zigachev  */
47*b843c749SSergey Zigachev static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
48*b843c749SSergey Zigachev 
49*b843c749SSergey Zigachev /**
50*b843c749SSergey Zigachev  * amdgpu_ib_get - request an IB (Indirect Buffer)
51*b843c749SSergey Zigachev  *
52*b843c749SSergey Zigachev  * @ring: ring index the IB is associated with
53*b843c749SSergey Zigachev  * @size: requested IB size
54*b843c749SSergey Zigachev  * @ib: IB object returned
55*b843c749SSergey Zigachev  *
56*b843c749SSergey Zigachev  * Request an IB (all asics).  IBs are allocated using the
57*b843c749SSergey Zigachev  * suballocator.
58*b843c749SSergey Zigachev  * Returns 0 on success, error on failure.
59*b843c749SSergey Zigachev  */
amdgpu_ib_get(struct amdgpu_device * adev,struct amdgpu_vm * vm,unsigned size,struct amdgpu_ib * ib)60*b843c749SSergey Zigachev int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
61*b843c749SSergey Zigachev 		  unsigned size, struct amdgpu_ib *ib)
62*b843c749SSergey Zigachev {
63*b843c749SSergey Zigachev 	int r;
64*b843c749SSergey Zigachev 
65*b843c749SSergey Zigachev 	if (size) {
66*b843c749SSergey Zigachev 		r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
67*b843c749SSergey Zigachev 				      &ib->sa_bo, size, 256);
68*b843c749SSergey Zigachev 		if (r) {
69*b843c749SSergey Zigachev 			dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
70*b843c749SSergey Zigachev 			return r;
71*b843c749SSergey Zigachev 		}
72*b843c749SSergey Zigachev 
73*b843c749SSergey Zigachev 		ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
74*b843c749SSergey Zigachev 
75*b843c749SSergey Zigachev 		if (!vm)
76*b843c749SSergey Zigachev 			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
77*b843c749SSergey Zigachev 	}
78*b843c749SSergey Zigachev 
79*b843c749SSergey Zigachev 	return 0;
80*b843c749SSergey Zigachev }
81*b843c749SSergey Zigachev 
82*b843c749SSergey Zigachev /**
83*b843c749SSergey Zigachev  * amdgpu_ib_free - free an IB (Indirect Buffer)
84*b843c749SSergey Zigachev  *
85*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
86*b843c749SSergey Zigachev  * @ib: IB object to free
87*b843c749SSergey Zigachev  * @f: the fence SA bo need wait on for the ib alloation
88*b843c749SSergey Zigachev  *
89*b843c749SSergey Zigachev  * Free an IB (all asics).
90*b843c749SSergey Zigachev  */
amdgpu_ib_free(struct amdgpu_device * adev,struct amdgpu_ib * ib,struct dma_fence * f)91*b843c749SSergey Zigachev void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
92*b843c749SSergey Zigachev 		    struct dma_fence *f)
93*b843c749SSergey Zigachev {
94*b843c749SSergey Zigachev 	amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
95*b843c749SSergey Zigachev }
96*b843c749SSergey Zigachev 
97*b843c749SSergey Zigachev /**
98*b843c749SSergey Zigachev  * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
99*b843c749SSergey Zigachev  *
100*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
101*b843c749SSergey Zigachev  * @num_ibs: number of IBs to schedule
102*b843c749SSergey Zigachev  * @ibs: IB objects to schedule
103*b843c749SSergey Zigachev  * @f: fence created during this submission
104*b843c749SSergey Zigachev  *
105*b843c749SSergey Zigachev  * Schedule an IB on the associated ring (all asics).
106*b843c749SSergey Zigachev  * Returns 0 on success, error on failure.
107*b843c749SSergey Zigachev  *
108*b843c749SSergey Zigachev  * On SI, there are two parallel engines fed from the primary ring,
109*b843c749SSergey Zigachev  * the CE (Constant Engine) and the DE (Drawing Engine).  Since
110*b843c749SSergey Zigachev  * resource descriptors have moved to memory, the CE allows you to
111*b843c749SSergey Zigachev  * prime the caches while the DE is updating register state so that
112*b843c749SSergey Zigachev  * the resource descriptors will be already in cache when the draw is
113*b843c749SSergey Zigachev  * processed.  To accomplish this, the userspace driver submits two
114*b843c749SSergey Zigachev  * IBs, one for the CE and one for the DE.  If there is a CE IB (called
115*b843c749SSergey Zigachev  * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
116*b843c749SSergey Zigachev  * to SI there was just a DE IB.
117*b843c749SSergey Zigachev  */
amdgpu_ib_schedule(struct amdgpu_ring * ring,unsigned num_ibs,struct amdgpu_ib * ibs,struct amdgpu_job * job,struct dma_fence ** f)118*b843c749SSergey Zigachev int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
119*b843c749SSergey Zigachev 		       struct amdgpu_ib *ibs, struct amdgpu_job *job,
120*b843c749SSergey Zigachev 		       struct dma_fence **f)
121*b843c749SSergey Zigachev {
122*b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
123*b843c749SSergey Zigachev 	struct amdgpu_ib *ib = &ibs[0];
124*b843c749SSergey Zigachev 	struct dma_fence *tmp = NULL;
125*b843c749SSergey Zigachev 	bool skip_preamble, need_ctx_switch;
126*b843c749SSergey Zigachev 	unsigned patch_offset = ~0;
127*b843c749SSergey Zigachev 	struct amdgpu_vm *vm;
128*b843c749SSergey Zigachev 	uint64_t fence_ctx;
129*b843c749SSergey Zigachev 	uint32_t status = 0, alloc_size;
130*b843c749SSergey Zigachev 	unsigned fence_flags = 0;
131*b843c749SSergey Zigachev 
132*b843c749SSergey Zigachev 	unsigned i;
133*b843c749SSergey Zigachev 	int r = 0;
134*b843c749SSergey Zigachev 	bool need_pipe_sync = false;
135*b843c749SSergey Zigachev 
136*b843c749SSergey Zigachev 	if (num_ibs == 0)
137*b843c749SSergey Zigachev 		return -EINVAL;
138*b843c749SSergey Zigachev 
139*b843c749SSergey Zigachev 	/* ring tests don't use a job */
140*b843c749SSergey Zigachev 	if (job) {
141*b843c749SSergey Zigachev 		vm = job->vm;
142*b843c749SSergey Zigachev 		fence_ctx = job->base.s_fence ?
143*b843c749SSergey Zigachev 			job->base.s_fence->scheduled.context : 0;
144*b843c749SSergey Zigachev 	} else {
145*b843c749SSergey Zigachev 		vm = NULL;
146*b843c749SSergey Zigachev 		fence_ctx = 0;
147*b843c749SSergey Zigachev 	}
148*b843c749SSergey Zigachev 
149*b843c749SSergey Zigachev 	if (!ring->ready) {
150*b843c749SSergey Zigachev 		dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
151*b843c749SSergey Zigachev 		return -EINVAL;
152*b843c749SSergey Zigachev 	}
153*b843c749SSergey Zigachev 
154*b843c749SSergey Zigachev 	if (vm && !job->vmid) {
155*b843c749SSergey Zigachev 		dev_err(adev->dev, "VM IB without ID\n");
156*b843c749SSergey Zigachev 		return -EINVAL;
157*b843c749SSergey Zigachev 	}
158*b843c749SSergey Zigachev 
159*b843c749SSergey Zigachev 	alloc_size = ring->funcs->emit_frame_size + num_ibs *
160*b843c749SSergey Zigachev 		ring->funcs->emit_ib_size;
161*b843c749SSergey Zigachev 
162*b843c749SSergey Zigachev 	r = amdgpu_ring_alloc(ring, alloc_size);
163*b843c749SSergey Zigachev 	if (r) {
164*b843c749SSergey Zigachev 		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
165*b843c749SSergey Zigachev 		return r;
166*b843c749SSergey Zigachev 	}
167*b843c749SSergey Zigachev 
168*b843c749SSergey Zigachev 	need_ctx_switch = ring->current_ctx != fence_ctx;
169*b843c749SSergey Zigachev 	if (ring->funcs->emit_pipeline_sync && job &&
170*b843c749SSergey Zigachev 	    ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
171*b843c749SSergey Zigachev 	     (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
172*b843c749SSergey Zigachev 	     amdgpu_vm_need_pipeline_sync(ring, job))) {
173*b843c749SSergey Zigachev 		need_pipe_sync = true;
174*b843c749SSergey Zigachev 		dma_fence_put(tmp);
175*b843c749SSergey Zigachev 	}
176*b843c749SSergey Zigachev 
177*b843c749SSergey Zigachev 	if (ring->funcs->insert_start)
178*b843c749SSergey Zigachev 		ring->funcs->insert_start(ring);
179*b843c749SSergey Zigachev 
180*b843c749SSergey Zigachev 	if (job) {
181*b843c749SSergey Zigachev 		r = amdgpu_vm_flush(ring, job, need_pipe_sync);
182*b843c749SSergey Zigachev 		if (r) {
183*b843c749SSergey Zigachev 			amdgpu_ring_undo(ring);
184*b843c749SSergey Zigachev 			return r;
185*b843c749SSergey Zigachev 		}
186*b843c749SSergey Zigachev 	}
187*b843c749SSergey Zigachev 
188*b843c749SSergey Zigachev 	if (job && ring->funcs->init_cond_exec)
189*b843c749SSergey Zigachev 		patch_offset = amdgpu_ring_init_cond_exec(ring);
190*b843c749SSergey Zigachev 
191*b843c749SSergey Zigachev #ifdef CONFIG_X86_64
192*b843c749SSergey Zigachev 	if (!(adev->flags & AMD_IS_APU))
193*b843c749SSergey Zigachev #endif
194*b843c749SSergey Zigachev 	{
195*b843c749SSergey Zigachev 		if (ring->funcs->emit_hdp_flush)
196*b843c749SSergey Zigachev 			amdgpu_ring_emit_hdp_flush(ring);
197*b843c749SSergey Zigachev 		else
198*b843c749SSergey Zigachev 			amdgpu_asic_flush_hdp(adev, ring);
199*b843c749SSergey Zigachev 	}
200*b843c749SSergey Zigachev 
201*b843c749SSergey Zigachev 	skip_preamble = ring->current_ctx == fence_ctx;
202*b843c749SSergey Zigachev 	if (job && ring->funcs->emit_cntxcntl) {
203*b843c749SSergey Zigachev 		if (need_ctx_switch)
204*b843c749SSergey Zigachev 			status |= AMDGPU_HAVE_CTX_SWITCH;
205*b843c749SSergey Zigachev 		status |= job->preamble_status;
206*b843c749SSergey Zigachev 
207*b843c749SSergey Zigachev 		amdgpu_ring_emit_cntxcntl(ring, status);
208*b843c749SSergey Zigachev 	}
209*b843c749SSergey Zigachev 
210*b843c749SSergey Zigachev 	for (i = 0; i < num_ibs; ++i) {
211*b843c749SSergey Zigachev 		ib = &ibs[i];
212*b843c749SSergey Zigachev 
213*b843c749SSergey Zigachev 		/* drop preamble IBs if we don't have a context switch */
214*b843c749SSergey Zigachev 		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
215*b843c749SSergey Zigachev 			skip_preamble &&
216*b843c749SSergey Zigachev 			!(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
217*b843c749SSergey Zigachev 			!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
218*b843c749SSergey Zigachev 			continue;
219*b843c749SSergey Zigachev 
220*b843c749SSergey Zigachev 		amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
221*b843c749SSergey Zigachev 				    need_ctx_switch);
222*b843c749SSergey Zigachev 		need_ctx_switch = false;
223*b843c749SSergey Zigachev 	}
224*b843c749SSergey Zigachev 
225*b843c749SSergey Zigachev 	if (ring->funcs->emit_tmz)
226*b843c749SSergey Zigachev 		amdgpu_ring_emit_tmz(ring, false);
227*b843c749SSergey Zigachev 
228*b843c749SSergey Zigachev #ifdef CONFIG_X86_64
229*b843c749SSergey Zigachev 	if (!(adev->flags & AMD_IS_APU))
230*b843c749SSergey Zigachev #endif
231*b843c749SSergey Zigachev 		amdgpu_asic_invalidate_hdp(adev, ring);
232*b843c749SSergey Zigachev 
233*b843c749SSergey Zigachev 	if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
234*b843c749SSergey Zigachev 		fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
235*b843c749SSergey Zigachev 
236*b843c749SSergey Zigachev 	/* wrap the last IB with fence */
237*b843c749SSergey Zigachev 	if (job && job->uf_addr) {
238*b843c749SSergey Zigachev 		amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
239*b843c749SSergey Zigachev 				       fence_flags | AMDGPU_FENCE_FLAG_64BIT);
240*b843c749SSergey Zigachev 	}
241*b843c749SSergey Zigachev 
242*b843c749SSergey Zigachev 	r = amdgpu_fence_emit(ring, f, fence_flags);
243*b843c749SSergey Zigachev 	if (r) {
244*b843c749SSergey Zigachev 		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
245*b843c749SSergey Zigachev 		if (job && job->vmid)
246*b843c749SSergey Zigachev 			amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
247*b843c749SSergey Zigachev 		amdgpu_ring_undo(ring);
248*b843c749SSergey Zigachev 		return r;
249*b843c749SSergey Zigachev 	}
250*b843c749SSergey Zigachev 
251*b843c749SSergey Zigachev 	if (ring->funcs->insert_end)
252*b843c749SSergey Zigachev 		ring->funcs->insert_end(ring);
253*b843c749SSergey Zigachev 
254*b843c749SSergey Zigachev 	if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
255*b843c749SSergey Zigachev 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
256*b843c749SSergey Zigachev 
257*b843c749SSergey Zigachev 	ring->current_ctx = fence_ctx;
258*b843c749SSergey Zigachev 	if (vm && ring->funcs->emit_switch_buffer)
259*b843c749SSergey Zigachev 		amdgpu_ring_emit_switch_buffer(ring);
260*b843c749SSergey Zigachev 	amdgpu_ring_commit(ring);
261*b843c749SSergey Zigachev 	return 0;
262*b843c749SSergey Zigachev }
263*b843c749SSergey Zigachev 
264*b843c749SSergey Zigachev /**
265*b843c749SSergey Zigachev  * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
266*b843c749SSergey Zigachev  *
267*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
268*b843c749SSergey Zigachev  *
269*b843c749SSergey Zigachev  * Initialize the suballocator to manage a pool of memory
270*b843c749SSergey Zigachev  * for use as IBs (all asics).
271*b843c749SSergey Zigachev  * Returns 0 on success, error on failure.
272*b843c749SSergey Zigachev  */
amdgpu_ib_pool_init(struct amdgpu_device * adev)273*b843c749SSergey Zigachev int amdgpu_ib_pool_init(struct amdgpu_device *adev)
274*b843c749SSergey Zigachev {
275*b843c749SSergey Zigachev 	int r;
276*b843c749SSergey Zigachev 
277*b843c749SSergey Zigachev 	if (adev->ib_pool_ready) {
278*b843c749SSergey Zigachev 		return 0;
279*b843c749SSergey Zigachev 	}
280*b843c749SSergey Zigachev 	r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
281*b843c749SSergey Zigachev 				      AMDGPU_IB_POOL_SIZE*64*1024,
282*b843c749SSergey Zigachev 				      AMDGPU_GPU_PAGE_SIZE,
283*b843c749SSergey Zigachev 				      AMDGPU_GEM_DOMAIN_GTT);
284*b843c749SSergey Zigachev 	if (r) {
285*b843c749SSergey Zigachev 		return r;
286*b843c749SSergey Zigachev 	}
287*b843c749SSergey Zigachev 
288*b843c749SSergey Zigachev 	adev->ib_pool_ready = true;
289*b843c749SSergey Zigachev 	if (amdgpu_debugfs_sa_init(adev)) {
290*b843c749SSergey Zigachev 		dev_err(adev->dev, "failed to register debugfs file for SA\n");
291*b843c749SSergey Zigachev 	}
292*b843c749SSergey Zigachev 	return 0;
293*b843c749SSergey Zigachev }
294*b843c749SSergey Zigachev 
295*b843c749SSergey Zigachev /**
296*b843c749SSergey Zigachev  * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
297*b843c749SSergey Zigachev  *
298*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
299*b843c749SSergey Zigachev  *
300*b843c749SSergey Zigachev  * Tear down the suballocator managing the pool of memory
301*b843c749SSergey Zigachev  * for use as IBs (all asics).
302*b843c749SSergey Zigachev  */
amdgpu_ib_pool_fini(struct amdgpu_device * adev)303*b843c749SSergey Zigachev void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
304*b843c749SSergey Zigachev {
305*b843c749SSergey Zigachev 	if (adev->ib_pool_ready) {
306*b843c749SSergey Zigachev 		amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
307*b843c749SSergey Zigachev 		adev->ib_pool_ready = false;
308*b843c749SSergey Zigachev 	}
309*b843c749SSergey Zigachev }
310*b843c749SSergey Zigachev 
311*b843c749SSergey Zigachev /**
312*b843c749SSergey Zigachev  * amdgpu_ib_ring_tests - test IBs on the rings
313*b843c749SSergey Zigachev  *
314*b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
315*b843c749SSergey Zigachev  *
316*b843c749SSergey Zigachev  * Test an IB (Indirect Buffer) on each ring.
317*b843c749SSergey Zigachev  * If the test fails, disable the ring.
318*b843c749SSergey Zigachev  * Returns 0 on success, error if the primary GFX ring
319*b843c749SSergey Zigachev  * IB test fails.
320*b843c749SSergey Zigachev  */
amdgpu_ib_ring_tests(struct amdgpu_device * adev)321*b843c749SSergey Zigachev int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
322*b843c749SSergey Zigachev {
323*b843c749SSergey Zigachev 	unsigned i;
324*b843c749SSergey Zigachev 	int r, ret = 0;
325*b843c749SSergey Zigachev 	long tmo_gfx, tmo_mm;
326*b843c749SSergey Zigachev 
327*b843c749SSergey Zigachev 	tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
328*b843c749SSergey Zigachev 	if (amdgpu_sriov_vf(adev)) {
329*b843c749SSergey Zigachev 		/* for MM engines in hypervisor side they are not scheduled together
330*b843c749SSergey Zigachev 		 * with CP and SDMA engines, so even in exclusive mode MM engine could
331*b843c749SSergey Zigachev 		 * still running on other VF thus the IB TEST TIMEOUT for MM engines
332*b843c749SSergey Zigachev 		 * under SR-IOV should be set to a long time. 8 sec should be enough
333*b843c749SSergey Zigachev 		 * for the MM comes back to this VF.
334*b843c749SSergey Zigachev 		 */
335*b843c749SSergey Zigachev 		tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
336*b843c749SSergey Zigachev 	}
337*b843c749SSergey Zigachev 
338*b843c749SSergey Zigachev 	if (amdgpu_sriov_runtime(adev)) {
339*b843c749SSergey Zigachev 		/* for CP & SDMA engines since they are scheduled together so
340*b843c749SSergey Zigachev 		 * need to make the timeout width enough to cover the time
341*b843c749SSergey Zigachev 		 * cost waiting for it coming back under RUNTIME only
342*b843c749SSergey Zigachev 		*/
343*b843c749SSergey Zigachev 		tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
344*b843c749SSergey Zigachev 	}
345*b843c749SSergey Zigachev 
346*b843c749SSergey Zigachev 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
347*b843c749SSergey Zigachev 		struct amdgpu_ring *ring = adev->rings[i];
348*b843c749SSergey Zigachev 		long tmo;
349*b843c749SSergey Zigachev 
350*b843c749SSergey Zigachev 		if (!ring || !ring->ready)
351*b843c749SSergey Zigachev 			continue;
352*b843c749SSergey Zigachev 
353*b843c749SSergey Zigachev 		/* MM engine need more time */
354*b843c749SSergey Zigachev 		if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
355*b843c749SSergey Zigachev 			ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
356*b843c749SSergey Zigachev 			ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
357*b843c749SSergey Zigachev 			ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
358*b843c749SSergey Zigachev 			ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
359*b843c749SSergey Zigachev 			ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
360*b843c749SSergey Zigachev 			tmo = tmo_mm;
361*b843c749SSergey Zigachev 		else
362*b843c749SSergey Zigachev 			tmo = tmo_gfx;
363*b843c749SSergey Zigachev 
364*b843c749SSergey Zigachev 		r = amdgpu_ring_test_ib(ring, tmo);
365*b843c749SSergey Zigachev 		if (r) {
366*b843c749SSergey Zigachev 			ring->ready = false;
367*b843c749SSergey Zigachev 
368*b843c749SSergey Zigachev 			if (ring == &adev->gfx.gfx_ring[0]) {
369*b843c749SSergey Zigachev 				/* oh, oh, that's really bad */
370*b843c749SSergey Zigachev 				DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
371*b843c749SSergey Zigachev 				adev->accel_working = false;
372*b843c749SSergey Zigachev 				return r;
373*b843c749SSergey Zigachev 
374*b843c749SSergey Zigachev 			} else {
375*b843c749SSergey Zigachev 				/* still not good, but we can live with it */
376*b843c749SSergey Zigachev 				DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
377*b843c749SSergey Zigachev 				ret = r;
378*b843c749SSergey Zigachev 			}
379*b843c749SSergey Zigachev 		}
380*b843c749SSergey Zigachev 	}
381*b843c749SSergey Zigachev 	return ret;
382*b843c749SSergey Zigachev }
383*b843c749SSergey Zigachev 
384*b843c749SSergey Zigachev /*
385*b843c749SSergey Zigachev  * Debugfs info
386*b843c749SSergey Zigachev  */
387*b843c749SSergey Zigachev #if defined(CONFIG_DEBUG_FS)
388*b843c749SSergey Zigachev 
amdgpu_debugfs_sa_info(struct seq_file * m,void * data)389*b843c749SSergey Zigachev static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
390*b843c749SSergey Zigachev {
391*b843c749SSergey Zigachev 	struct drm_info_node *node = (struct drm_info_node *) m->private;
392*b843c749SSergey Zigachev 	struct drm_device *dev = node->minor->dev;
393*b843c749SSergey Zigachev 	struct amdgpu_device *adev = dev->dev_private;
394*b843c749SSergey Zigachev 
395*b843c749SSergey Zigachev 	amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
396*b843c749SSergey Zigachev 
397*b843c749SSergey Zigachev 	return 0;
398*b843c749SSergey Zigachev 
399*b843c749SSergey Zigachev }
400*b843c749SSergey Zigachev 
401*b843c749SSergey Zigachev static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
402*b843c749SSergey Zigachev 	{"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
403*b843c749SSergey Zigachev };
404*b843c749SSergey Zigachev 
405*b843c749SSergey Zigachev #endif
406*b843c749SSergey Zigachev 
amdgpu_debugfs_sa_init(struct amdgpu_device * adev)407*b843c749SSergey Zigachev static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
408*b843c749SSergey Zigachev {
409*b843c749SSergey Zigachev #if defined(CONFIG_DEBUG_FS)
410*b843c749SSergey Zigachev 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
411*b843c749SSergey Zigachev #else
412*b843c749SSergey Zigachev 	return 0;
413*b843c749SSergey Zigachev #endif
414*b843c749SSergey Zigachev }
415