1*41ec0267Sriastradh /* $NetBSD: amdgpu_ib.c,v 1.3 2021/12/18 23:44:58 riastradh Exp $ */
2efa246c0Sriastradh
3efa246c0Sriastradh /*
4efa246c0Sriastradh * Copyright 2008 Advanced Micro Devices, Inc.
5efa246c0Sriastradh * Copyright 2008 Red Hat Inc.
6efa246c0Sriastradh * Copyright 2009 Jerome Glisse.
7efa246c0Sriastradh *
8efa246c0Sriastradh * Permission is hereby granted, free of charge, to any person obtaining a
9efa246c0Sriastradh * copy of this software and associated documentation files (the "Software"),
10efa246c0Sriastradh * to deal in the Software without restriction, including without limitation
11efa246c0Sriastradh * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12efa246c0Sriastradh * and/or sell copies of the Software, and to permit persons to whom the
13efa246c0Sriastradh * Software is furnished to do so, subject to the following conditions:
14efa246c0Sriastradh *
15efa246c0Sriastradh * The above copyright notice and this permission notice shall be included in
16efa246c0Sriastradh * all copies or substantial portions of the Software.
17efa246c0Sriastradh *
18efa246c0Sriastradh * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19efa246c0Sriastradh * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20efa246c0Sriastradh * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21efa246c0Sriastradh * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22efa246c0Sriastradh * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23efa246c0Sriastradh * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24efa246c0Sriastradh * OTHER DEALINGS IN THE SOFTWARE.
25efa246c0Sriastradh *
26efa246c0Sriastradh * Authors: Dave Airlie
27efa246c0Sriastradh * Alex Deucher
28efa246c0Sriastradh * Jerome Glisse
29efa246c0Sriastradh * Christian König
30efa246c0Sriastradh */
31efa246c0Sriastradh #include <sys/cdefs.h>
32*41ec0267Sriastradh __KERNEL_RCSID(0, "$NetBSD: amdgpu_ib.c,v 1.3 2021/12/18 23:44:58 riastradh Exp $");
33efa246c0Sriastradh
34efa246c0Sriastradh #include <linux/seq_file.h>
35efa246c0Sriastradh #include <linux/slab.h>
36*41ec0267Sriastradh
37efa246c0Sriastradh #include <drm/amdgpu_drm.h>
38*41ec0267Sriastradh #include <drm/drm_debugfs.h>
39*41ec0267Sriastradh
40efa246c0Sriastradh #include "amdgpu.h"
41efa246c0Sriastradh #include "atom.h"
42*41ec0267Sriastradh #include "amdgpu_trace.h"
43*41ec0267Sriastradh
44*41ec0267Sriastradh #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
45*41ec0267Sriastradh #define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
46efa246c0Sriastradh
47efa246c0Sriastradh /*
48efa246c0Sriastradh * IB
49efa246c0Sriastradh * IBs (Indirect Buffers) and areas of GPU accessible memory where
50efa246c0Sriastradh * commands are stored. You can put a pointer to the IB in the
51efa246c0Sriastradh * command ring and the hw will fetch the commands from the IB
52efa246c0Sriastradh * and execute them. Generally userspace acceleration drivers
53efa246c0Sriastradh * produce command buffers which are send to the kernel and
54efa246c0Sriastradh * put in IBs for execution by the requested ring.
55efa246c0Sriastradh */
56efa246c0Sriastradh static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
57efa246c0Sriastradh
58efa246c0Sriastradh /**
59efa246c0Sriastradh * amdgpu_ib_get - request an IB (Indirect Buffer)
60efa246c0Sriastradh *
61efa246c0Sriastradh * @ring: ring index the IB is associated with
62efa246c0Sriastradh * @size: requested IB size
63efa246c0Sriastradh * @ib: IB object returned
64efa246c0Sriastradh *
65efa246c0Sriastradh * Request an IB (all asics). IBs are allocated using the
66efa246c0Sriastradh * suballocator.
67efa246c0Sriastradh * Returns 0 on success, error on failure.
68efa246c0Sriastradh */
amdgpu_ib_get(struct amdgpu_device * adev,struct amdgpu_vm * vm,unsigned size,struct amdgpu_ib * ib)69*41ec0267Sriastradh int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
70efa246c0Sriastradh unsigned size, struct amdgpu_ib *ib)
71efa246c0Sriastradh {
72efa246c0Sriastradh int r;
73efa246c0Sriastradh
74efa246c0Sriastradh if (size) {
75efa246c0Sriastradh r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
76efa246c0Sriastradh &ib->sa_bo, size, 256);
77efa246c0Sriastradh if (r) {
78efa246c0Sriastradh dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
79efa246c0Sriastradh return r;
80efa246c0Sriastradh }
81efa246c0Sriastradh
82efa246c0Sriastradh ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
83efa246c0Sriastradh
84efa246c0Sriastradh if (!vm)
85efa246c0Sriastradh ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
86efa246c0Sriastradh }
87efa246c0Sriastradh
88efa246c0Sriastradh return 0;
89efa246c0Sriastradh }
90efa246c0Sriastradh
91efa246c0Sriastradh /**
92efa246c0Sriastradh * amdgpu_ib_free - free an IB (Indirect Buffer)
93efa246c0Sriastradh *
94efa246c0Sriastradh * @adev: amdgpu_device pointer
95efa246c0Sriastradh * @ib: IB object to free
96*41ec0267Sriastradh * @f: the fence SA bo need wait on for the ib alloation
97efa246c0Sriastradh *
98efa246c0Sriastradh * Free an IB (all asics).
99efa246c0Sriastradh */
amdgpu_ib_free(struct amdgpu_device * adev,struct amdgpu_ib * ib,struct dma_fence * f)100*41ec0267Sriastradh void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
101*41ec0267Sriastradh struct dma_fence *f)
102efa246c0Sriastradh {
103*41ec0267Sriastradh amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
104efa246c0Sriastradh }
105efa246c0Sriastradh
106efa246c0Sriastradh /**
107efa246c0Sriastradh * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
108efa246c0Sriastradh *
109efa246c0Sriastradh * @adev: amdgpu_device pointer
110efa246c0Sriastradh * @num_ibs: number of IBs to schedule
111efa246c0Sriastradh * @ibs: IB objects to schedule
112*41ec0267Sriastradh * @f: fence created during this submission
113efa246c0Sriastradh *
114efa246c0Sriastradh * Schedule an IB on the associated ring (all asics).
115efa246c0Sriastradh * Returns 0 on success, error on failure.
116efa246c0Sriastradh *
117efa246c0Sriastradh * On SI, there are two parallel engines fed from the primary ring,
118efa246c0Sriastradh * the CE (Constant Engine) and the DE (Drawing Engine). Since
119efa246c0Sriastradh * resource descriptors have moved to memory, the CE allows you to
120efa246c0Sriastradh * prime the caches while the DE is updating register state so that
121efa246c0Sriastradh * the resource descriptors will be already in cache when the draw is
122efa246c0Sriastradh * processed. To accomplish this, the userspace driver submits two
123efa246c0Sriastradh * IBs, one for the CE and one for the DE. If there is a CE IB (called
124efa246c0Sriastradh * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
125efa246c0Sriastradh * to SI there was just a DE IB.
126efa246c0Sriastradh */
amdgpu_ib_schedule(struct amdgpu_ring * ring,unsigned num_ibs,struct amdgpu_ib * ibs,struct amdgpu_job * job,struct dma_fence ** f)127*41ec0267Sriastradh int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
128*41ec0267Sriastradh struct amdgpu_ib *ibs, struct amdgpu_job *job,
129*41ec0267Sriastradh struct dma_fence **f)
130efa246c0Sriastradh {
131*41ec0267Sriastradh struct amdgpu_device *adev = ring->adev;
132efa246c0Sriastradh struct amdgpu_ib *ib = &ibs[0];
133*41ec0267Sriastradh struct dma_fence *tmp = NULL;
134*41ec0267Sriastradh bool skip_preamble, need_ctx_switch;
135*41ec0267Sriastradh unsigned patch_offset = ~0;
136efa246c0Sriastradh struct amdgpu_vm *vm;
137*41ec0267Sriastradh uint64_t fence_ctx;
138*41ec0267Sriastradh uint32_t status = 0, alloc_size;
139*41ec0267Sriastradh unsigned fence_flags = 0;
140*41ec0267Sriastradh
141efa246c0Sriastradh unsigned i;
142efa246c0Sriastradh int r = 0;
143*41ec0267Sriastradh bool need_pipe_sync = false;
144efa246c0Sriastradh
145efa246c0Sriastradh if (num_ibs == 0)
146efa246c0Sriastradh return -EINVAL;
147efa246c0Sriastradh
148*41ec0267Sriastradh /* ring tests don't use a job */
149*41ec0267Sriastradh if (job) {
150*41ec0267Sriastradh vm = job->vm;
151*41ec0267Sriastradh fence_ctx = job->base.s_fence ?
152*41ec0267Sriastradh job->base.s_fence->scheduled.context : 0;
153*41ec0267Sriastradh } else {
154*41ec0267Sriastradh vm = NULL;
155*41ec0267Sriastradh fence_ctx = 0;
156*41ec0267Sriastradh }
157efa246c0Sriastradh
158*41ec0267Sriastradh if (!ring->sched.ready) {
159*41ec0267Sriastradh dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
160efa246c0Sriastradh return -EINVAL;
161efa246c0Sriastradh }
162*41ec0267Sriastradh
163*41ec0267Sriastradh if (vm && !job->vmid) {
164*41ec0267Sriastradh dev_err(adev->dev, "VM IB without ID\n");
165*41ec0267Sriastradh return -EINVAL;
166efa246c0Sriastradh }
167*41ec0267Sriastradh
168*41ec0267Sriastradh alloc_size = ring->funcs->emit_frame_size + num_ibs *
169*41ec0267Sriastradh ring->funcs->emit_ib_size;
170*41ec0267Sriastradh
171*41ec0267Sriastradh r = amdgpu_ring_alloc(ring, alloc_size);
172efa246c0Sriastradh if (r) {
173efa246c0Sriastradh dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
174efa246c0Sriastradh return r;
175efa246c0Sriastradh }
176efa246c0Sriastradh
177*41ec0267Sriastradh need_ctx_switch = ring->current_ctx != fence_ctx;
178*41ec0267Sriastradh if (ring->funcs->emit_pipeline_sync && job &&
179*41ec0267Sriastradh ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
180*41ec0267Sriastradh (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
181*41ec0267Sriastradh amdgpu_vm_need_pipeline_sync(ring, job))) {
182*41ec0267Sriastradh need_pipe_sync = true;
183*41ec0267Sriastradh
184*41ec0267Sriastradh if (tmp)
185*41ec0267Sriastradh trace_amdgpu_ib_pipe_sync(job, tmp);
186*41ec0267Sriastradh
187*41ec0267Sriastradh dma_fence_put(tmp);
188*41ec0267Sriastradh }
189*41ec0267Sriastradh
190*41ec0267Sriastradh if (ring->funcs->insert_start)
191*41ec0267Sriastradh ring->funcs->insert_start(ring);
192*41ec0267Sriastradh
193*41ec0267Sriastradh if (job) {
194*41ec0267Sriastradh r = amdgpu_vm_flush(ring, job, need_pipe_sync);
195efa246c0Sriastradh if (r) {
196*41ec0267Sriastradh amdgpu_ring_undo(ring);
197efa246c0Sriastradh return r;
198efa246c0Sriastradh }
199efa246c0Sriastradh }
200efa246c0Sriastradh
201*41ec0267Sriastradh if (job && ring->funcs->init_cond_exec)
202*41ec0267Sriastradh patch_offset = amdgpu_ring_init_cond_exec(ring);
203efa246c0Sriastradh
204*41ec0267Sriastradh #ifdef CONFIG_X86_64
205*41ec0267Sriastradh if (!(adev->flags & AMD_IS_APU))
206*41ec0267Sriastradh #endif
207*41ec0267Sriastradh {
208efa246c0Sriastradh if (ring->funcs->emit_hdp_flush)
209efa246c0Sriastradh amdgpu_ring_emit_hdp_flush(ring);
210*41ec0267Sriastradh else
211*41ec0267Sriastradh amdgpu_asic_flush_hdp(adev, ring);
212efa246c0Sriastradh }
213efa246c0Sriastradh
214*41ec0267Sriastradh if (need_ctx_switch)
215*41ec0267Sriastradh status |= AMDGPU_HAVE_CTX_SWITCH;
216*41ec0267Sriastradh
217*41ec0267Sriastradh skip_preamble = ring->current_ctx == fence_ctx;
218*41ec0267Sriastradh if (job && ring->funcs->emit_cntxcntl) {
219*41ec0267Sriastradh status |= job->preamble_status;
220*41ec0267Sriastradh status |= job->preemption_status;
221*41ec0267Sriastradh amdgpu_ring_emit_cntxcntl(ring, status);
222*41ec0267Sriastradh }
223*41ec0267Sriastradh
224efa246c0Sriastradh for (i = 0; i < num_ibs; ++i) {
225efa246c0Sriastradh ib = &ibs[i];
226efa246c0Sriastradh
227*41ec0267Sriastradh /* drop preamble IBs if we don't have a context switch */
228*41ec0267Sriastradh if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
229*41ec0267Sriastradh skip_preamble &&
230*41ec0267Sriastradh !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
231*41ec0267Sriastradh !amdgpu_mcbp &&
232*41ec0267Sriastradh !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
233*41ec0267Sriastradh continue;
234*41ec0267Sriastradh
235*41ec0267Sriastradh amdgpu_ring_emit_ib(ring, job, ib, status);
236*41ec0267Sriastradh status &= ~AMDGPU_HAVE_CTX_SWITCH;
237efa246c0Sriastradh }
238efa246c0Sriastradh
239*41ec0267Sriastradh if (ring->funcs->emit_tmz)
240*41ec0267Sriastradh amdgpu_ring_emit_tmz(ring, false);
241*41ec0267Sriastradh
242*41ec0267Sriastradh #ifdef CONFIG_X86_64
243*41ec0267Sriastradh if (!(adev->flags & AMD_IS_APU))
244*41ec0267Sriastradh #endif
245*41ec0267Sriastradh amdgpu_asic_invalidate_hdp(adev, ring);
246*41ec0267Sriastradh
247*41ec0267Sriastradh if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
248*41ec0267Sriastradh fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
249*41ec0267Sriastradh
250*41ec0267Sriastradh /* wrap the last IB with fence */
251*41ec0267Sriastradh if (job && job->uf_addr) {
252*41ec0267Sriastradh amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
253*41ec0267Sriastradh fence_flags | AMDGPU_FENCE_FLAG_64BIT);
254*41ec0267Sriastradh }
255*41ec0267Sriastradh
256*41ec0267Sriastradh r = amdgpu_fence_emit(ring, f, fence_flags);
257efa246c0Sriastradh if (r) {
258efa246c0Sriastradh dev_err(adev->dev, "failed to emit fence (%d)\n", r);
259*41ec0267Sriastradh if (job && job->vmid)
260*41ec0267Sriastradh amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
261*41ec0267Sriastradh amdgpu_ring_undo(ring);
262efa246c0Sriastradh return r;
263efa246c0Sriastradh }
264efa246c0Sriastradh
265*41ec0267Sriastradh if (ring->funcs->insert_end)
266*41ec0267Sriastradh ring->funcs->insert_end(ring);
267efa246c0Sriastradh
268*41ec0267Sriastradh if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
269*41ec0267Sriastradh amdgpu_ring_patch_cond_exec(ring, patch_offset);
270efa246c0Sriastradh
271*41ec0267Sriastradh ring->current_ctx = fence_ctx;
272*41ec0267Sriastradh if (vm && ring->funcs->emit_switch_buffer)
273*41ec0267Sriastradh amdgpu_ring_emit_switch_buffer(ring);
274*41ec0267Sriastradh amdgpu_ring_commit(ring);
275efa246c0Sriastradh return 0;
276efa246c0Sriastradh }
277efa246c0Sriastradh
278efa246c0Sriastradh /**
279efa246c0Sriastradh * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
280efa246c0Sriastradh *
281efa246c0Sriastradh * @adev: amdgpu_device pointer
282efa246c0Sriastradh *
283efa246c0Sriastradh * Initialize the suballocator to manage a pool of memory
284efa246c0Sriastradh * for use as IBs (all asics).
285efa246c0Sriastradh * Returns 0 on success, error on failure.
286efa246c0Sriastradh */
amdgpu_ib_pool_init(struct amdgpu_device * adev)287efa246c0Sriastradh int amdgpu_ib_pool_init(struct amdgpu_device *adev)
288efa246c0Sriastradh {
289efa246c0Sriastradh int r;
290efa246c0Sriastradh
291efa246c0Sriastradh if (adev->ib_pool_ready) {
292efa246c0Sriastradh return 0;
293efa246c0Sriastradh }
294efa246c0Sriastradh r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
295efa246c0Sriastradh AMDGPU_IB_POOL_SIZE*64*1024,
296efa246c0Sriastradh AMDGPU_GPU_PAGE_SIZE,
297efa246c0Sriastradh AMDGPU_GEM_DOMAIN_GTT);
298efa246c0Sriastradh if (r) {
299efa246c0Sriastradh return r;
300efa246c0Sriastradh }
301efa246c0Sriastradh
302efa246c0Sriastradh adev->ib_pool_ready = true;
303efa246c0Sriastradh if (amdgpu_debugfs_sa_init(adev)) {
304efa246c0Sriastradh dev_err(adev->dev, "failed to register debugfs file for SA\n");
305efa246c0Sriastradh }
306efa246c0Sriastradh return 0;
307efa246c0Sriastradh }
308efa246c0Sriastradh
309efa246c0Sriastradh /**
310efa246c0Sriastradh * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
311efa246c0Sriastradh *
312efa246c0Sriastradh * @adev: amdgpu_device pointer
313efa246c0Sriastradh *
314efa246c0Sriastradh * Tear down the suballocator managing the pool of memory
315efa246c0Sriastradh * for use as IBs (all asics).
316efa246c0Sriastradh */
amdgpu_ib_pool_fini(struct amdgpu_device * adev)317efa246c0Sriastradh void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
318efa246c0Sriastradh {
319efa246c0Sriastradh if (adev->ib_pool_ready) {
320efa246c0Sriastradh amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
321efa246c0Sriastradh adev->ib_pool_ready = false;
322efa246c0Sriastradh }
323efa246c0Sriastradh }
324efa246c0Sriastradh
325efa246c0Sriastradh /**
326efa246c0Sriastradh * amdgpu_ib_ring_tests - test IBs on the rings
327efa246c0Sriastradh *
328efa246c0Sriastradh * @adev: amdgpu_device pointer
329efa246c0Sriastradh *
330efa246c0Sriastradh * Test an IB (Indirect Buffer) on each ring.
331efa246c0Sriastradh * If the test fails, disable the ring.
332efa246c0Sriastradh * Returns 0 on success, error if the primary GFX ring
333efa246c0Sriastradh * IB test fails.
334efa246c0Sriastradh */
amdgpu_ib_ring_tests(struct amdgpu_device * adev)335efa246c0Sriastradh int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
336efa246c0Sriastradh {
337efa246c0Sriastradh unsigned i;
338efa246c0Sriastradh int r, ret = 0;
339*41ec0267Sriastradh long tmo_gfx, tmo_mm;
340efa246c0Sriastradh
341*41ec0267Sriastradh tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
342*41ec0267Sriastradh if (amdgpu_sriov_vf(adev)) {
343*41ec0267Sriastradh /* for MM engines in hypervisor side they are not scheduled together
344*41ec0267Sriastradh * with CP and SDMA engines, so even in exclusive mode MM engine could
345*41ec0267Sriastradh * still running on other VF thus the IB TEST TIMEOUT for MM engines
346*41ec0267Sriastradh * under SR-IOV should be set to a long time. 8 sec should be enough
347*41ec0267Sriastradh * for the MM comes back to this VF.
348*41ec0267Sriastradh */
349*41ec0267Sriastradh tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
350*41ec0267Sriastradh }
351*41ec0267Sriastradh
352*41ec0267Sriastradh if (amdgpu_sriov_runtime(adev)) {
353*41ec0267Sriastradh /* for CP & SDMA engines since they are scheduled together so
354*41ec0267Sriastradh * need to make the timeout width enough to cover the time
355*41ec0267Sriastradh * cost waiting for it coming back under RUNTIME only
356*41ec0267Sriastradh */
357*41ec0267Sriastradh tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
358*41ec0267Sriastradh } else if (adev->gmc.xgmi.hive_id) {
359*41ec0267Sriastradh tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
360*41ec0267Sriastradh }
361*41ec0267Sriastradh
362*41ec0267Sriastradh for (i = 0; i < adev->num_rings; ++i) {
363efa246c0Sriastradh struct amdgpu_ring *ring = adev->rings[i];
364*41ec0267Sriastradh long tmo;
365efa246c0Sriastradh
366*41ec0267Sriastradh /* KIQ rings don't have an IB test because we never submit IBs
367*41ec0267Sriastradh * to them and they have no interrupt support.
368*41ec0267Sriastradh */
369*41ec0267Sriastradh if (!ring->sched.ready || !ring->funcs->test_ib)
370efa246c0Sriastradh continue;
371efa246c0Sriastradh
372*41ec0267Sriastradh /* MM engine need more time */
373*41ec0267Sriastradh if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
374*41ec0267Sriastradh ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
375*41ec0267Sriastradh ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
376*41ec0267Sriastradh ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
377*41ec0267Sriastradh ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
378*41ec0267Sriastradh ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
379*41ec0267Sriastradh tmo = tmo_mm;
380*41ec0267Sriastradh else
381*41ec0267Sriastradh tmo = tmo_gfx;
382*41ec0267Sriastradh
383*41ec0267Sriastradh r = amdgpu_ring_test_ib(ring, tmo);
384*41ec0267Sriastradh if (!r) {
385*41ec0267Sriastradh DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
386*41ec0267Sriastradh ring->name);
387*41ec0267Sriastradh continue;
388*41ec0267Sriastradh }
389*41ec0267Sriastradh
390*41ec0267Sriastradh ring->sched.ready = false;
391*41ec0267Sriastradh DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
392*41ec0267Sriastradh ring->name, r);
393efa246c0Sriastradh
394efa246c0Sriastradh if (ring == &adev->gfx.gfx_ring[0]) {
395efa246c0Sriastradh /* oh, oh, that's really bad */
396efa246c0Sriastradh adev->accel_working = false;
397efa246c0Sriastradh return r;
398efa246c0Sriastradh
399efa246c0Sriastradh } else {
400efa246c0Sriastradh ret = r;
401efa246c0Sriastradh }
402efa246c0Sriastradh }
403efa246c0Sriastradh return ret;
404efa246c0Sriastradh }
405efa246c0Sriastradh
406efa246c0Sriastradh /*
407efa246c0Sriastradh * Debugfs info
408efa246c0Sriastradh */
409efa246c0Sriastradh #if defined(CONFIG_DEBUG_FS)
410efa246c0Sriastradh
amdgpu_debugfs_sa_info(struct seq_file * m,void * data)411efa246c0Sriastradh static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
412efa246c0Sriastradh {
413efa246c0Sriastradh struct drm_info_node *node = (struct drm_info_node *) m->private;
414efa246c0Sriastradh struct drm_device *dev = node->minor->dev;
415efa246c0Sriastradh struct amdgpu_device *adev = dev->dev_private;
416efa246c0Sriastradh
417efa246c0Sriastradh amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
418efa246c0Sriastradh
419efa246c0Sriastradh return 0;
420efa246c0Sriastradh
421efa246c0Sriastradh }
422efa246c0Sriastradh
423*41ec0267Sriastradh static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
424efa246c0Sriastradh {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
425efa246c0Sriastradh };
426efa246c0Sriastradh
427efa246c0Sriastradh #endif
428efa246c0Sriastradh
amdgpu_debugfs_sa_init(struct amdgpu_device * adev)429efa246c0Sriastradh static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
430efa246c0Sriastradh {
431efa246c0Sriastradh #if defined(CONFIG_DEBUG_FS)
432efa246c0Sriastradh return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
433efa246c0Sriastradh #else
434efa246c0Sriastradh return 0;
435efa246c0Sriastradh #endif
436efa246c0Sriastradh }
437