1*07eb61ceSriastradh /* $NetBSD: amdgpu_ring.c,v 1.7 2021/12/19 12:31:45 riastradh Exp $ */
2efa246c0Sriastradh
3efa246c0Sriastradh /*
4efa246c0Sriastradh * Copyright 2008 Advanced Micro Devices, Inc.
5efa246c0Sriastradh * Copyright 2008 Red Hat Inc.
6efa246c0Sriastradh * Copyright 2009 Jerome Glisse.
7efa246c0Sriastradh *
8efa246c0Sriastradh * Permission is hereby granted, free of charge, to any person obtaining a
9efa246c0Sriastradh * copy of this software and associated documentation files (the "Software"),
10efa246c0Sriastradh * to deal in the Software without restriction, including without limitation
11efa246c0Sriastradh * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12efa246c0Sriastradh * and/or sell copies of the Software, and to permit persons to whom the
13efa246c0Sriastradh * Software is furnished to do so, subject to the following conditions:
14efa246c0Sriastradh *
15efa246c0Sriastradh * The above copyright notice and this permission notice shall be included in
16efa246c0Sriastradh * all copies or substantial portions of the Software.
17efa246c0Sriastradh *
18efa246c0Sriastradh * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19efa246c0Sriastradh * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20efa246c0Sriastradh * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21efa246c0Sriastradh * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22efa246c0Sriastradh * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23efa246c0Sriastradh * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24efa246c0Sriastradh * OTHER DEALINGS IN THE SOFTWARE.
25efa246c0Sriastradh *
26efa246c0Sriastradh * Authors: Dave Airlie
27efa246c0Sriastradh * Alex Deucher
28efa246c0Sriastradh * Jerome Glisse
29efa246c0Sriastradh * Christian König
30efa246c0Sriastradh */
31efa246c0Sriastradh #include <sys/cdefs.h>
32*07eb61ceSriastradh __KERNEL_RCSID(0, "$NetBSD: amdgpu_ring.c,v 1.7 2021/12/19 12:31:45 riastradh Exp $");
33efa246c0Sriastradh
34efa246c0Sriastradh #include <linux/seq_file.h>
35efa246c0Sriastradh #include <linux/slab.h>
3641ec0267Sriastradh #include <linux/uaccess.h>
3741ec0267Sriastradh #include <linux/debugfs.h>
3841ec0267Sriastradh
39efa246c0Sriastradh #include <drm/amdgpu_drm.h>
40efa246c0Sriastradh #include "amdgpu.h"
41efa246c0Sriastradh #include "atom.h"
42efa246c0Sriastradh
432b73d18aSriastradh #include <linux/nbsd-namespace.h>
442b73d18aSriastradh
45efa246c0Sriastradh /*
46efa246c0Sriastradh * Rings
47efa246c0Sriastradh * Most engines on the GPU are fed via ring buffers. Ring
48efa246c0Sriastradh * buffers are areas of GPU accessible memory that the host
49efa246c0Sriastradh * writes commands into and the GPU reads commands out of.
50efa246c0Sriastradh * There is a rptr (read pointer) that determines where the
51efa246c0Sriastradh * GPU is currently reading, and a wptr (write pointer)
52efa246c0Sriastradh * which determines where the host has written. When the
53efa246c0Sriastradh * pointers are equal, the ring is idle. When the host
54efa246c0Sriastradh * writes commands to the ring buffer, it increments the
55efa246c0Sriastradh * wptr. The GPU then starts fetching commands and executes
56efa246c0Sriastradh * them until the pointers are equal again.
57efa246c0Sriastradh */
5841ec0267Sriastradh static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
5941ec0267Sriastradh struct amdgpu_ring *ring);
6041ec0267Sriastradh static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
61efa246c0Sriastradh
62efa246c0Sriastradh /**
63efa246c0Sriastradh * amdgpu_ring_alloc - allocate space on the ring buffer
64efa246c0Sriastradh *
65efa246c0Sriastradh * @adev: amdgpu_device pointer
66efa246c0Sriastradh * @ring: amdgpu_ring structure holding ring information
67efa246c0Sriastradh * @ndw: number of dwords to allocate in the ring buffer
68efa246c0Sriastradh *
69efa246c0Sriastradh * Allocate @ndw dwords in the ring buffer (all asics).
70efa246c0Sriastradh * Returns 0 on success, error on failure.
71efa246c0Sriastradh */
amdgpu_ring_alloc(struct amdgpu_ring * ring,unsigned ndw)72efa246c0Sriastradh int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
73efa246c0Sriastradh {
74efa246c0Sriastradh /* Align requested size with padding so unlock_commit can
75efa246c0Sriastradh * pad safely */
7641ec0267Sriastradh ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
7741ec0267Sriastradh
7841ec0267Sriastradh /* Make sure we aren't trying to allocate more space
7941ec0267Sriastradh * than the maximum for one submission
8041ec0267Sriastradh */
8141ec0267Sriastradh if (WARN_ON_ONCE(ndw > ring->max_dw))
8241ec0267Sriastradh return -ENOMEM;
8341ec0267Sriastradh
84efa246c0Sriastradh ring->count_dw = ndw;
85efa246c0Sriastradh ring->wptr_old = ring->wptr;
86efa246c0Sriastradh
8741ec0267Sriastradh if (ring->funcs->begin_use)
8841ec0267Sriastradh ring->funcs->begin_use(ring);
89efa246c0Sriastradh
90efa246c0Sriastradh return 0;
91efa246c0Sriastradh }
92efa246c0Sriastradh
93efa246c0Sriastradh /** amdgpu_ring_insert_nop - insert NOP packets
94efa246c0Sriastradh *
95efa246c0Sriastradh * @ring: amdgpu_ring structure holding ring information
96efa246c0Sriastradh * @count: the number of NOP packets to insert
97efa246c0Sriastradh *
98efa246c0Sriastradh * This is the generic insert_nop function for rings except SDMA
99efa246c0Sriastradh */
amdgpu_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)100efa246c0Sriastradh void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
101efa246c0Sriastradh {
102efa246c0Sriastradh int i;
103efa246c0Sriastradh
104efa246c0Sriastradh for (i = 0; i < count; i++)
10541ec0267Sriastradh amdgpu_ring_write(ring, ring->funcs->nop);
10641ec0267Sriastradh }
10741ec0267Sriastradh
10841ec0267Sriastradh /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
10941ec0267Sriastradh *
11041ec0267Sriastradh * @ring: amdgpu_ring structure holding ring information
11141ec0267Sriastradh * @ib: IB to add NOP packets to
11241ec0267Sriastradh *
11341ec0267Sriastradh * This is the generic pad_ib function for rings except SDMA
11441ec0267Sriastradh */
amdgpu_ring_generic_pad_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib)11541ec0267Sriastradh void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
11641ec0267Sriastradh {
11741ec0267Sriastradh while (ib->length_dw & ring->funcs->align_mask)
11841ec0267Sriastradh ib->ptr[ib->length_dw++] = ring->funcs->nop;
119efa246c0Sriastradh }
120efa246c0Sriastradh
121efa246c0Sriastradh /**
122efa246c0Sriastradh * amdgpu_ring_commit - tell the GPU to execute the new
123efa246c0Sriastradh * commands on the ring buffer
124efa246c0Sriastradh *
125efa246c0Sriastradh * @adev: amdgpu_device pointer
126efa246c0Sriastradh * @ring: amdgpu_ring structure holding ring information
127efa246c0Sriastradh *
128efa246c0Sriastradh * Update the wptr (write pointer) to tell the GPU to
129efa246c0Sriastradh * execute new commands on the ring buffer (all asics).
130efa246c0Sriastradh */
amdgpu_ring_commit(struct amdgpu_ring * ring)131efa246c0Sriastradh void amdgpu_ring_commit(struct amdgpu_ring *ring)
132efa246c0Sriastradh {
133efa246c0Sriastradh uint32_t count;
134efa246c0Sriastradh
135efa246c0Sriastradh /* We pad to match fetch size */
13641ec0267Sriastradh count = ring->funcs->align_mask + 1 -
13741ec0267Sriastradh (ring->wptr & ring->funcs->align_mask);
13841ec0267Sriastradh count %= ring->funcs->align_mask + 1;
139efa246c0Sriastradh ring->funcs->insert_nop(ring, count);
140efa246c0Sriastradh
141efa246c0Sriastradh mb();
142efa246c0Sriastradh amdgpu_ring_set_wptr(ring);
143efa246c0Sriastradh
14441ec0267Sriastradh if (ring->funcs->end_use)
14541ec0267Sriastradh ring->funcs->end_use(ring);
146efa246c0Sriastradh }
147efa246c0Sriastradh
148efa246c0Sriastradh /**
149efa246c0Sriastradh * amdgpu_ring_undo - reset the wptr
150efa246c0Sriastradh *
151efa246c0Sriastradh * @ring: amdgpu_ring structure holding ring information
152efa246c0Sriastradh *
153efa246c0Sriastradh * Reset the driver's copy of the wptr (all asics).
154efa246c0Sriastradh */
amdgpu_ring_undo(struct amdgpu_ring * ring)155efa246c0Sriastradh void amdgpu_ring_undo(struct amdgpu_ring *ring)
156efa246c0Sriastradh {
157efa246c0Sriastradh ring->wptr = ring->wptr_old;
15841ec0267Sriastradh
15941ec0267Sriastradh if (ring->funcs->end_use)
16041ec0267Sriastradh ring->funcs->end_use(ring);
161efa246c0Sriastradh }
162efa246c0Sriastradh
163efa246c0Sriastradh /**
16441ec0267Sriastradh * amdgpu_ring_priority_put - restore a ring's priority
165efa246c0Sriastradh *
16641ec0267Sriastradh * @ring: amdgpu_ring structure holding the information
16741ec0267Sriastradh * @priority: target priority
168efa246c0Sriastradh *
16941ec0267Sriastradh * Release a request for executing at @priority
170efa246c0Sriastradh */
amdgpu_ring_priority_put(struct amdgpu_ring * ring,enum drm_sched_priority priority)17141ec0267Sriastradh void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
17241ec0267Sriastradh enum drm_sched_priority priority)
173efa246c0Sriastradh {
17441ec0267Sriastradh int i;
17541ec0267Sriastradh
17641ec0267Sriastradh if (!ring->funcs->set_priority)
17741ec0267Sriastradh return;
17841ec0267Sriastradh
17941ec0267Sriastradh if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
18041ec0267Sriastradh return;
18141ec0267Sriastradh
18241ec0267Sriastradh /* no need to restore if the job is already at the lowest priority */
18341ec0267Sriastradh if (priority == DRM_SCHED_PRIORITY_NORMAL)
18441ec0267Sriastradh return;
18541ec0267Sriastradh
18641ec0267Sriastradh mutex_lock(&ring->priority_mutex);
18741ec0267Sriastradh /* something higher prio is executing, no need to decay */
18841ec0267Sriastradh if (ring->priority > priority)
18941ec0267Sriastradh goto out_unlock;
19041ec0267Sriastradh
19141ec0267Sriastradh /* decay priority to the next level with a job available */
19241ec0267Sriastradh for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
19341ec0267Sriastradh if (i == DRM_SCHED_PRIORITY_NORMAL
19441ec0267Sriastradh || atomic_read(&ring->num_jobs[i])) {
19541ec0267Sriastradh ring->priority = i;
19641ec0267Sriastradh ring->funcs->set_priority(ring, i);
19741ec0267Sriastradh break;
19841ec0267Sriastradh }
19941ec0267Sriastradh }
20041ec0267Sriastradh
20141ec0267Sriastradh out_unlock:
20241ec0267Sriastradh mutex_unlock(&ring->priority_mutex);
203efa246c0Sriastradh }
204efa246c0Sriastradh
205efa246c0Sriastradh /**
20641ec0267Sriastradh * amdgpu_ring_priority_get - change the ring's priority
207efa246c0Sriastradh *
20841ec0267Sriastradh * @ring: amdgpu_ring structure holding the information
20941ec0267Sriastradh * @priority: target priority
210efa246c0Sriastradh *
21141ec0267Sriastradh * Request a ring's priority to be raised to @priority (refcounted).
212efa246c0Sriastradh */
amdgpu_ring_priority_get(struct amdgpu_ring * ring,enum drm_sched_priority priority)21341ec0267Sriastradh void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
21441ec0267Sriastradh enum drm_sched_priority priority)
215efa246c0Sriastradh {
21641ec0267Sriastradh if (!ring->funcs->set_priority)
21741ec0267Sriastradh return;
218efa246c0Sriastradh
21941ec0267Sriastradh if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
22041ec0267Sriastradh return;
221efa246c0Sriastradh
22241ec0267Sriastradh mutex_lock(&ring->priority_mutex);
22341ec0267Sriastradh if (priority <= ring->priority)
22441ec0267Sriastradh goto out_unlock;
225efa246c0Sriastradh
22641ec0267Sriastradh ring->priority = priority;
22741ec0267Sriastradh ring->funcs->set_priority(ring, priority);
228efa246c0Sriastradh
22941ec0267Sriastradh out_unlock:
23041ec0267Sriastradh mutex_unlock(&ring->priority_mutex);
231efa246c0Sriastradh }
232efa246c0Sriastradh
233efa246c0Sriastradh /**
234efa246c0Sriastradh * amdgpu_ring_init - init driver ring struct.
235efa246c0Sriastradh *
236efa246c0Sriastradh * @adev: amdgpu_device pointer
237efa246c0Sriastradh * @ring: amdgpu_ring structure holding ring information
23841ec0267Sriastradh * @max_ndw: maximum number of dw for ring alloc
239efa246c0Sriastradh * @nop: nop packet for this ring
240efa246c0Sriastradh *
241efa246c0Sriastradh * Initialize the driver information for the selected ring (all asics).
242efa246c0Sriastradh * Returns 0 on success, error on failure.
243efa246c0Sriastradh */
amdgpu_ring_init(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned max_dw,struct amdgpu_irq_src * irq_src,unsigned irq_type)244efa246c0Sriastradh int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
24541ec0267Sriastradh unsigned max_dw, struct amdgpu_irq_src *irq_src,
24641ec0267Sriastradh unsigned irq_type)
247efa246c0Sriastradh {
24841ec0267Sriastradh int r, i;
24941ec0267Sriastradh int sched_hw_submission = amdgpu_sched_hw_submission;
25041ec0267Sriastradh
25141ec0267Sriastradh /* Set the hw submission limit higher for KIQ because
25241ec0267Sriastradh * it's used for a number of gfx/compute tasks by both
25341ec0267Sriastradh * KFD and KGD which may have outstanding fences and
25441ec0267Sriastradh * it doesn't really use the gpu scheduler anyway;
25541ec0267Sriastradh * KIQ tasks get submitted directly to the ring.
25641ec0267Sriastradh */
25741ec0267Sriastradh if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
25841ec0267Sriastradh sched_hw_submission = max(sched_hw_submission, 256);
25941ec0267Sriastradh else if (ring == &adev->sdma.instance[0].page)
26041ec0267Sriastradh sched_hw_submission = 256;
261efa246c0Sriastradh
262efa246c0Sriastradh if (ring->adev == NULL) {
263efa246c0Sriastradh if (adev->num_rings >= AMDGPU_MAX_RINGS)
264efa246c0Sriastradh return -EINVAL;
265efa246c0Sriastradh
266efa246c0Sriastradh ring->adev = adev;
267efa246c0Sriastradh ring->idx = adev->num_rings++;
268efa246c0Sriastradh adev->rings[ring->idx] = ring;
26941ec0267Sriastradh r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
270efa246c0Sriastradh if (r)
271efa246c0Sriastradh return r;
272efa246c0Sriastradh }
273efa246c0Sriastradh
27441ec0267Sriastradh r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
275efa246c0Sriastradh if (r) {
276efa246c0Sriastradh dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
277efa246c0Sriastradh return r;
278efa246c0Sriastradh }
279efa246c0Sriastradh
28041ec0267Sriastradh r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
281efa246c0Sriastradh if (r) {
282efa246c0Sriastradh dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
283efa246c0Sriastradh return r;
284efa246c0Sriastradh }
285efa246c0Sriastradh
28641ec0267Sriastradh r = amdgpu_device_wb_get(adev, &ring->fence_offs);
287efa246c0Sriastradh if (r) {
288efa246c0Sriastradh dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
289efa246c0Sriastradh return r;
290efa246c0Sriastradh }
291efa246c0Sriastradh
29241ec0267Sriastradh r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
293efa246c0Sriastradh if (r) {
29441ec0267Sriastradh dev_err(adev->dev,
29541ec0267Sriastradh "(%d) ring trail_fence_offs wb alloc failed\n", r);
296efa246c0Sriastradh return r;
297efa246c0Sriastradh }
29841ec0267Sriastradh ring->trail_fence_gpu_addr =
29941ec0267Sriastradh adev->wb.gpu_addr + (ring->trail_fence_offs * 4);
30041ec0267Sriastradh ring->trail_fence_cpu_addr = &adev->wb.wb[ring->trail_fence_offs];
30141ec0267Sriastradh
30241ec0267Sriastradh r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
30341ec0267Sriastradh if (r) {
30441ec0267Sriastradh dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
30541ec0267Sriastradh return r;
30641ec0267Sriastradh }
30741ec0267Sriastradh ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
30841ec0267Sriastradh ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
30941ec0267Sriastradh /* always set cond_exec_polling to CONTINUE */
31041ec0267Sriastradh *ring->cond_exe_cpu_addr = 1;
31141ec0267Sriastradh
312efa246c0Sriastradh r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
313efa246c0Sriastradh if (r) {
314efa246c0Sriastradh dev_err(adev->dev, "failed initializing fences (%d).\n", r);
315efa246c0Sriastradh return r;
316efa246c0Sriastradh }
317efa246c0Sriastradh
31841ec0267Sriastradh ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
319efa246c0Sriastradh
32041ec0267Sriastradh ring->buf_mask = (ring->ring_size / 4) - 1;
32141ec0267Sriastradh ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
32241ec0267Sriastradh 0xffffffffffffffff : ring->buf_mask;
323efa246c0Sriastradh /* Allocate ring buffer */
324efa246c0Sriastradh if (ring->ring_obj == NULL) {
32541ec0267Sriastradh r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
32641ec0267Sriastradh AMDGPU_GEM_DOMAIN_GTT,
32741ec0267Sriastradh &ring->ring_obj,
32841ec0267Sriastradh &ring->gpu_addr,
3292b73d18aSriastradh (void **)__UNVOLATILE(&ring->ring));
330efa246c0Sriastradh if (r) {
331efa246c0Sriastradh dev_err(adev->dev, "(%d) ring create failed\n", r);
332efa246c0Sriastradh return r;
333efa246c0Sriastradh }
33441ec0267Sriastradh amdgpu_ring_clear_ring(ring);
335efa246c0Sriastradh }
33641ec0267Sriastradh
33741ec0267Sriastradh ring->max_dw = max_dw;
33841ec0267Sriastradh ring->priority = DRM_SCHED_PRIORITY_NORMAL;
33941ec0267Sriastradh mutex_init(&ring->priority_mutex);
34041ec0267Sriastradh
34141ec0267Sriastradh for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
34241ec0267Sriastradh atomic_set(&ring->num_jobs[i], 0);
343efa246c0Sriastradh
344efa246c0Sriastradh if (amdgpu_debugfs_ring_init(adev, ring)) {
345efa246c0Sriastradh DRM_ERROR("Failed to register debugfs file for rings !\n");
346efa246c0Sriastradh }
34741ec0267Sriastradh
348efa246c0Sriastradh return 0;
349efa246c0Sriastradh }
350efa246c0Sriastradh
351efa246c0Sriastradh /**
352efa246c0Sriastradh * amdgpu_ring_fini - tear down the driver ring struct.
353efa246c0Sriastradh *
354efa246c0Sriastradh * @adev: amdgpu_device pointer
355efa246c0Sriastradh * @ring: amdgpu_ring structure holding ring information
356efa246c0Sriastradh *
357efa246c0Sriastradh * Tear down the driver information for the selected ring (all asics).
358efa246c0Sriastradh */
amdgpu_ring_fini(struct amdgpu_ring * ring)359efa246c0Sriastradh void amdgpu_ring_fini(struct amdgpu_ring *ring)
360efa246c0Sriastradh {
36141ec0267Sriastradh ring->sched.ready = false;
362efa246c0Sriastradh
36341ec0267Sriastradh /* Not to finish a ring which is not initialized */
36441ec0267Sriastradh if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
365efa246c0Sriastradh return;
366efa246c0Sriastradh
36741ec0267Sriastradh amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
36841ec0267Sriastradh amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
369efa246c0Sriastradh
37041ec0267Sriastradh amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
37141ec0267Sriastradh amdgpu_device_wb_free(ring->adev, ring->fence_offs);
372efa246c0Sriastradh
37341ec0267Sriastradh amdgpu_bo_free_kernel(&ring->ring_obj,
37441ec0267Sriastradh &ring->gpu_addr,
3752b73d18aSriastradh (void **)__UNVOLATILE(&ring->ring));
37641ec0267Sriastradh
37741ec0267Sriastradh amdgpu_debugfs_ring_fini(ring);
37841ec0267Sriastradh
37941ec0267Sriastradh dma_fence_put(ring->vmid_wait);
38041ec0267Sriastradh ring->vmid_wait = NULL;
38141ec0267Sriastradh ring->me = 0;
38241ec0267Sriastradh
38341ec0267Sriastradh ring->adev->rings[ring->idx] = NULL;
384*07eb61ceSriastradh
385*07eb61ceSriastradh mutex_destroy(&ring->priority_mutex);
386efa246c0Sriastradh }
387efa246c0Sriastradh
388efa246c0Sriastradh /**
38941ec0267Sriastradh * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
390efa246c0Sriastradh *
39141ec0267Sriastradh * @adev: amdgpu_device pointer
39241ec0267Sriastradh * @reg0: register to write
39341ec0267Sriastradh * @reg1: register to wait on
39441ec0267Sriastradh * @ref: reference value to write/wait on
39541ec0267Sriastradh * @mask: mask to wait on
396efa246c0Sriastradh *
39741ec0267Sriastradh * Helper for rings that don't support write and wait in a
39841ec0267Sriastradh * single oneshot packet.
399efa246c0Sriastradh */
amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)40041ec0267Sriastradh void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
40141ec0267Sriastradh uint32_t reg0, uint32_t reg1,
40241ec0267Sriastradh uint32_t ref, uint32_t mask)
403efa246c0Sriastradh {
40441ec0267Sriastradh amdgpu_ring_emit_wreg(ring, reg0, ref);
40541ec0267Sriastradh amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
40641ec0267Sriastradh }
407efa246c0Sriastradh
40841ec0267Sriastradh /**
40941ec0267Sriastradh * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
41041ec0267Sriastradh *
41141ec0267Sriastradh * @ring: ring to try the recovery on
41241ec0267Sriastradh * @vmid: VMID we try to get going again
41341ec0267Sriastradh * @fence: timedout fence
41441ec0267Sriastradh *
41541ec0267Sriastradh * Tries to get a ring proceeding again when it is stuck.
41641ec0267Sriastradh */
amdgpu_ring_soft_recovery(struct amdgpu_ring * ring,unsigned int vmid,struct dma_fence * fence)41741ec0267Sriastradh bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
41841ec0267Sriastradh struct dma_fence *fence)
41941ec0267Sriastradh {
42041ec0267Sriastradh ktime_t deadline = ktime_add_us(ktime_get(), 10000);
421efa246c0Sriastradh
42241ec0267Sriastradh if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
42341ec0267Sriastradh return false;
424efa246c0Sriastradh
42541ec0267Sriastradh atomic_inc(&ring->adev->gpu_reset_counter);
42641ec0267Sriastradh while (!dma_fence_is_signaled(fence) &&
42741ec0267Sriastradh ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
42841ec0267Sriastradh ring->funcs->soft_recovery(ring, vmid);
42941ec0267Sriastradh
43041ec0267Sriastradh return dma_fence_is_signaled(fence);
431efa246c0Sriastradh }
432efa246c0Sriastradh
433efa246c0Sriastradh /*
434efa246c0Sriastradh * Debugfs info
435efa246c0Sriastradh */
436efa246c0Sriastradh #if defined(CONFIG_DEBUG_FS)
437efa246c0Sriastradh
43841ec0267Sriastradh /* Layout of file is 12 bytes consisting of
43941ec0267Sriastradh * - rptr
44041ec0267Sriastradh * - wptr
44141ec0267Sriastradh * - driver's copy of wptr
44241ec0267Sriastradh *
44341ec0267Sriastradh * followed by n-words of ring data
444efa246c0Sriastradh */
amdgpu_debugfs_ring_read(struct file * f,char __user * buf,size_t size,loff_t * pos)44541ec0267Sriastradh static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
44641ec0267Sriastradh size_t size, loff_t *pos)
44741ec0267Sriastradh {
44841ec0267Sriastradh struct amdgpu_ring *ring = file_inode(f)->i_private;
44941ec0267Sriastradh int r, i;
45041ec0267Sriastradh uint32_t value, result, early[3];
45141ec0267Sriastradh
45241ec0267Sriastradh if (*pos & 3 || size & 3)
45341ec0267Sriastradh return -EINVAL;
45441ec0267Sriastradh
45541ec0267Sriastradh result = 0;
45641ec0267Sriastradh
45741ec0267Sriastradh if (*pos < 12) {
45841ec0267Sriastradh early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
45941ec0267Sriastradh early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
46041ec0267Sriastradh early[2] = ring->wptr & ring->buf_mask;
46141ec0267Sriastradh for (i = *pos / 4; i < 3 && size; i++) {
46241ec0267Sriastradh r = put_user(early[i], (uint32_t *)buf);
46341ec0267Sriastradh if (r)
46441ec0267Sriastradh return r;
46541ec0267Sriastradh buf += 4;
46641ec0267Sriastradh result += 4;
46741ec0267Sriastradh size -= 4;
46841ec0267Sriastradh *pos += 4;
469efa246c0Sriastradh }
470efa246c0Sriastradh }
471efa246c0Sriastradh
47241ec0267Sriastradh while (size) {
47341ec0267Sriastradh if (*pos >= (ring->ring_size + 12))
47441ec0267Sriastradh return result;
475efa246c0Sriastradh
47641ec0267Sriastradh value = ring->ring[(*pos - 12)/4];
47741ec0267Sriastradh r = put_user(value, (uint32_t*)buf);
47841ec0267Sriastradh if (r)
47941ec0267Sriastradh return r;
48041ec0267Sriastradh buf += 4;
48141ec0267Sriastradh result += 4;
48241ec0267Sriastradh size -= 4;
48341ec0267Sriastradh *pos += 4;
48441ec0267Sriastradh }
48541ec0267Sriastradh
48641ec0267Sriastradh return result;
48741ec0267Sriastradh }
48841ec0267Sriastradh
48941ec0267Sriastradh static const struct file_operations amdgpu_debugfs_ring_fops = {
49041ec0267Sriastradh .owner = THIS_MODULE,
49141ec0267Sriastradh .read = amdgpu_debugfs_ring_read,
49241ec0267Sriastradh .llseek = default_llseek
493efa246c0Sriastradh };
494efa246c0Sriastradh
495efa246c0Sriastradh #endif
496efa246c0Sriastradh
amdgpu_debugfs_ring_init(struct amdgpu_device * adev,struct amdgpu_ring * ring)49741ec0267Sriastradh static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
49841ec0267Sriastradh struct amdgpu_ring *ring)
499efa246c0Sriastradh {
500efa246c0Sriastradh #if defined(CONFIG_DEBUG_FS)
50141ec0267Sriastradh struct drm_minor *minor = adev->ddev->primary;
50241ec0267Sriastradh struct dentry *ent, *root = minor->debugfs_root;
50341ec0267Sriastradh char name[32];
504efa246c0Sriastradh
50541ec0267Sriastradh sprintf(name, "amdgpu_ring_%s", ring->name);
506efa246c0Sriastradh
50741ec0267Sriastradh ent = debugfs_create_file(name,
50841ec0267Sriastradh S_IFREG | S_IRUGO, root,
50941ec0267Sriastradh ring, &amdgpu_debugfs_ring_fops);
51041ec0267Sriastradh if (!ent)
51141ec0267Sriastradh return -ENOMEM;
51241ec0267Sriastradh
51341ec0267Sriastradh i_size_write(ent->d_inode, ring->ring_size + 12);
51441ec0267Sriastradh ring->ent = ent;
515efa246c0Sriastradh #endif
516efa246c0Sriastradh return 0;
517efa246c0Sriastradh }
51841ec0267Sriastradh
amdgpu_debugfs_ring_fini(struct amdgpu_ring * ring)51941ec0267Sriastradh static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
52041ec0267Sriastradh {
52141ec0267Sriastradh #if defined(CONFIG_DEBUG_FS)
52241ec0267Sriastradh debugfs_remove(ring->ent);
52341ec0267Sriastradh #endif
52441ec0267Sriastradh }
52541ec0267Sriastradh
52641ec0267Sriastradh /**
52741ec0267Sriastradh * amdgpu_ring_test_helper - tests ring and set sched readiness status
52841ec0267Sriastradh *
52941ec0267Sriastradh * @ring: ring to try the recovery on
53041ec0267Sriastradh *
53141ec0267Sriastradh * Tests ring and set sched readiness status
53241ec0267Sriastradh *
53341ec0267Sriastradh * Returns 0 on success, error on failure.
53441ec0267Sriastradh */
amdgpu_ring_test_helper(struct amdgpu_ring * ring)53541ec0267Sriastradh int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
53641ec0267Sriastradh {
53741ec0267Sriastradh struct amdgpu_device *adev = ring->adev;
53841ec0267Sriastradh int r;
53941ec0267Sriastradh
54041ec0267Sriastradh r = amdgpu_ring_test_ring(ring);
54141ec0267Sriastradh if (r)
54241ec0267Sriastradh DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
54341ec0267Sriastradh ring->name, r);
54441ec0267Sriastradh else
54541ec0267Sriastradh DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
54641ec0267Sriastradh ring->name);
54741ec0267Sriastradh
54841ec0267Sriastradh ring->sched.ready = !r;
54941ec0267Sriastradh return r;
55041ec0267Sriastradh }
551