xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ring_mux.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
1*f005ef32Sjsg /*
2*f005ef32Sjsg  * Copyright 2022 Advanced Micro Devices, Inc.
3*f005ef32Sjsg  *
4*f005ef32Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
5*f005ef32Sjsg  * copy of this software and associated documentation files (the "Software"),
6*f005ef32Sjsg  * to deal in the Software without restriction, including without limitation
7*f005ef32Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*f005ef32Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
9*f005ef32Sjsg  * Software is furnished to do so, subject to the following conditions:
10*f005ef32Sjsg  *
11*f005ef32Sjsg  * The above copyright notice and this permission notice shall be included in
12*f005ef32Sjsg  * all copies or substantial portions of the Software.
13*f005ef32Sjsg  *
14*f005ef32Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*f005ef32Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*f005ef32Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*f005ef32Sjsg  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*f005ef32Sjsg  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*f005ef32Sjsg  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*f005ef32Sjsg  * OTHER DEALINGS IN THE SOFTWARE.
21*f005ef32Sjsg  *
22*f005ef32Sjsg  */
23*f005ef32Sjsg #include <linux/slab.h>
24*f005ef32Sjsg #include <drm/drm_print.h>
25*f005ef32Sjsg 
26*f005ef32Sjsg #include "amdgpu_ring_mux.h"
27*f005ef32Sjsg #include "amdgpu_ring.h"
28*f005ef32Sjsg #include "amdgpu.h"
29*f005ef32Sjsg 
30*f005ef32Sjsg #define AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT (HZ / 2)
31*f005ef32Sjsg #define AMDGPU_MAX_LAST_UNSIGNALED_THRESHOLD_US 10000
32*f005ef32Sjsg 
33*f005ef32Sjsg static const struct ring_info {
34*f005ef32Sjsg 	unsigned int hw_pio;
35*f005ef32Sjsg 	const char *ring_name;
36*f005ef32Sjsg } sw_ring_info[] = {
37*f005ef32Sjsg 	{ AMDGPU_RING_PRIO_DEFAULT, "gfx_low"},
38*f005ef32Sjsg 	{ AMDGPU_RING_PRIO_2, "gfx_high"},
39*f005ef32Sjsg };
40*f005ef32Sjsg 
41*f005ef32Sjsg static struct pool amdgpu_mux_chunk_slab;
42*f005ef32Sjsg 
amdgpu_ring_mux_sw_entry(struct amdgpu_ring_mux * mux,struct amdgpu_ring * ring)43*f005ef32Sjsg static inline struct amdgpu_mux_entry *amdgpu_ring_mux_sw_entry(struct amdgpu_ring_mux *mux,
44*f005ef32Sjsg 								struct amdgpu_ring *ring)
45*f005ef32Sjsg {
46*f005ef32Sjsg 	return ring->entry_index < mux->ring_entry_size ?
47*f005ef32Sjsg 			&mux->ring_entry[ring->entry_index] : NULL;
48*f005ef32Sjsg }
49*f005ef32Sjsg 
50*f005ef32Sjsg /* copy packages on sw ring range[begin, end) */
amdgpu_ring_mux_copy_pkt_from_sw_ring(struct amdgpu_ring_mux * mux,struct amdgpu_ring * ring,u64 s_start,u64 s_end)51*f005ef32Sjsg static void amdgpu_ring_mux_copy_pkt_from_sw_ring(struct amdgpu_ring_mux *mux,
52*f005ef32Sjsg 						  struct amdgpu_ring *ring,
53*f005ef32Sjsg 						  u64 s_start, u64 s_end)
54*f005ef32Sjsg {
55*f005ef32Sjsg 	u64 start, end;
56*f005ef32Sjsg 	struct amdgpu_ring *real_ring = mux->real_ring;
57*f005ef32Sjsg 
58*f005ef32Sjsg 	start = s_start & ring->buf_mask;
59*f005ef32Sjsg 	end = s_end & ring->buf_mask;
60*f005ef32Sjsg 
61*f005ef32Sjsg 	if (start == end) {
62*f005ef32Sjsg 		DRM_ERROR("no more data copied from sw ring\n");
63*f005ef32Sjsg 		return;
64*f005ef32Sjsg 	}
65*f005ef32Sjsg 	if (start > end) {
66*f005ef32Sjsg 		amdgpu_ring_alloc(real_ring, (ring->ring_size >> 2) + end - start);
67*f005ef32Sjsg 		amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start],
68*f005ef32Sjsg 					   (ring->ring_size >> 2) - start);
69*f005ef32Sjsg 		amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[0], end);
70*f005ef32Sjsg 	} else {
71*f005ef32Sjsg 		amdgpu_ring_alloc(real_ring, end - start);
72*f005ef32Sjsg 		amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start], end - start);
73*f005ef32Sjsg 	}
74*f005ef32Sjsg }
75*f005ef32Sjsg 
amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux * mux)76*f005ef32Sjsg static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
77*f005ef32Sjsg {
78*f005ef32Sjsg 	struct amdgpu_mux_entry *e = NULL;
79*f005ef32Sjsg 	struct amdgpu_mux_chunk *chunk;
80*f005ef32Sjsg 	uint32_t seq, last_seq;
81*f005ef32Sjsg 	int i;
82*f005ef32Sjsg 
83*f005ef32Sjsg 	/*find low priority entries:*/
84*f005ef32Sjsg 	if (!mux->s_resubmit)
85*f005ef32Sjsg 		return;
86*f005ef32Sjsg 
87*f005ef32Sjsg 	for (i = 0; i < mux->num_ring_entries; i++) {
88*f005ef32Sjsg 		if (mux->ring_entry[i].ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) {
89*f005ef32Sjsg 			e = &mux->ring_entry[i];
90*f005ef32Sjsg 			break;
91*f005ef32Sjsg 		}
92*f005ef32Sjsg 	}
93*f005ef32Sjsg 
94*f005ef32Sjsg 	if (!e) {
95*f005ef32Sjsg 		DRM_ERROR("%s no low priority ring found\n", __func__);
96*f005ef32Sjsg 		return;
97*f005ef32Sjsg 	}
98*f005ef32Sjsg 
99*f005ef32Sjsg 	last_seq = atomic_read(&e->ring->fence_drv.last_seq);
100*f005ef32Sjsg 	seq = mux->seqno_to_resubmit;
101*f005ef32Sjsg 	if (last_seq < seq) {
102*f005ef32Sjsg 		/*resubmit all the fences between (last_seq, seq]*/
103*f005ef32Sjsg 		list_for_each_entry(chunk, &e->list, entry) {
104*f005ef32Sjsg 			if (chunk->sync_seq > last_seq && chunk->sync_seq <= seq) {
105*f005ef32Sjsg 				amdgpu_fence_update_start_timestamp(e->ring,
106*f005ef32Sjsg 								    chunk->sync_seq,
107*f005ef32Sjsg 								    ktime_get());
108*f005ef32Sjsg 				if (chunk->sync_seq ==
109*f005ef32Sjsg 					le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) {
110*f005ef32Sjsg 					if (chunk->cntl_offset <= e->ring->buf_mask)
111*f005ef32Sjsg 						amdgpu_ring_patch_cntl(e->ring,
112*f005ef32Sjsg 								       chunk->cntl_offset);
113*f005ef32Sjsg 					if (chunk->ce_offset <= e->ring->buf_mask)
114*f005ef32Sjsg 						amdgpu_ring_patch_ce(e->ring, chunk->ce_offset);
115*f005ef32Sjsg 					if (chunk->de_offset <= e->ring->buf_mask)
116*f005ef32Sjsg 						amdgpu_ring_patch_de(e->ring, chunk->de_offset);
117*f005ef32Sjsg 				}
118*f005ef32Sjsg 				amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring,
119*f005ef32Sjsg 								      chunk->start,
120*f005ef32Sjsg 								      chunk->end);
121*f005ef32Sjsg 				mux->wptr_resubmit = chunk->end;
122*f005ef32Sjsg 				amdgpu_ring_commit(mux->real_ring);
123*f005ef32Sjsg 			}
124*f005ef32Sjsg 		}
125*f005ef32Sjsg 	}
126*f005ef32Sjsg 
127*f005ef32Sjsg 	del_timer(&mux->resubmit_timer);
128*f005ef32Sjsg 	mux->s_resubmit = false;
129*f005ef32Sjsg }
130*f005ef32Sjsg 
amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux * mux)131*f005ef32Sjsg static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux)
132*f005ef32Sjsg {
133*f005ef32Sjsg 	mod_timer(&mux->resubmit_timer, jiffies + AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT);
134*f005ef32Sjsg }
135*f005ef32Sjsg 
amdgpu_mux_resubmit_fallback(void * arg)136*f005ef32Sjsg static void amdgpu_mux_resubmit_fallback(void *arg)
137*f005ef32Sjsg {
138*f005ef32Sjsg 	struct amdgpu_ring_mux *mux = arg;
139*f005ef32Sjsg 
140*f005ef32Sjsg 	if (!spin_trylock(&mux->lock)) {
141*f005ef32Sjsg 		amdgpu_ring_mux_schedule_resubmit(mux);
142*f005ef32Sjsg 		DRM_ERROR("reschedule resubmit\n");
143*f005ef32Sjsg 		return;
144*f005ef32Sjsg 	}
145*f005ef32Sjsg 	amdgpu_mux_resubmit_chunks(mux);
146*f005ef32Sjsg 	spin_unlock(&mux->lock);
147*f005ef32Sjsg }
148*f005ef32Sjsg 
amdgpu_ring_mux_init(struct amdgpu_ring_mux * mux,struct amdgpu_ring * ring,unsigned int entry_size)149*f005ef32Sjsg int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
150*f005ef32Sjsg 			 unsigned int entry_size)
151*f005ef32Sjsg {
152*f005ef32Sjsg 	mux->real_ring = ring;
153*f005ef32Sjsg 	mux->num_ring_entries = 0;
154*f005ef32Sjsg 
155*f005ef32Sjsg 	mux->ring_entry = kcalloc(entry_size, sizeof(struct amdgpu_mux_entry), GFP_KERNEL);
156*f005ef32Sjsg 	if (!mux->ring_entry)
157*f005ef32Sjsg 		return -ENOMEM;
158*f005ef32Sjsg 
159*f005ef32Sjsg 	mux->ring_entry_size = entry_size;
160*f005ef32Sjsg 	mux->s_resubmit = false;
161*f005ef32Sjsg 
162*f005ef32Sjsg #ifdef __linux__
163*f005ef32Sjsg 	amdgpu_mux_chunk_slab = kmem_cache_create("amdgpu_mux_chunk",
164*f005ef32Sjsg 						  sizeof(struct amdgpu_mux_chunk), 0,
165*f005ef32Sjsg 						  SLAB_HWCACHE_ALIGN, NULL);
166*f005ef32Sjsg 	if (!amdgpu_mux_chunk_slab) {
167*f005ef32Sjsg 		DRM_ERROR("create amdgpu_mux_chunk cache failed\n");
168*f005ef32Sjsg 		return -ENOMEM;
169*f005ef32Sjsg 	}
170*f005ef32Sjsg #else
171*f005ef32Sjsg 	pool_init(&amdgpu_mux_chunk_slab, sizeof(struct amdgpu_mux_chunk),
172*f005ef32Sjsg 	    CACHELINESIZE, IPL_TTY, 0, "amdgpu_mux_chunk", NULL);
173*f005ef32Sjsg #endif
174*f005ef32Sjsg 
175*f005ef32Sjsg 	mtx_init(&mux->lock, IPL_NONE);
176*f005ef32Sjsg #ifdef __linux__
177*f005ef32Sjsg 	timer_setup(&mux->resubmit_timer, amdgpu_mux_resubmit_fallback, 0);
178*f005ef32Sjsg #else
179*f005ef32Sjsg 	timeout_set(&mux->resubmit_timer, amdgpu_mux_resubmit_fallback, mux);
180*f005ef32Sjsg #endif
181*f005ef32Sjsg 
182*f005ef32Sjsg 	return 0;
183*f005ef32Sjsg }
184*f005ef32Sjsg 
amdgpu_ring_mux_fini(struct amdgpu_ring_mux * mux)185*f005ef32Sjsg void amdgpu_ring_mux_fini(struct amdgpu_ring_mux *mux)
186*f005ef32Sjsg {
187*f005ef32Sjsg 	struct amdgpu_mux_entry *e;
188*f005ef32Sjsg 	struct amdgpu_mux_chunk *chunk, *chunk2;
189*f005ef32Sjsg 	int i;
190*f005ef32Sjsg 
191*f005ef32Sjsg 	for (i = 0; i < mux->num_ring_entries; i++) {
192*f005ef32Sjsg 		e = &mux->ring_entry[i];
193*f005ef32Sjsg 		list_for_each_entry_safe(chunk, chunk2, &e->list, entry) {
194*f005ef32Sjsg 			list_del(&chunk->entry);
195*f005ef32Sjsg #ifdef __linux__
196*f005ef32Sjsg 			kmem_cache_free(amdgpu_mux_chunk_slab, chunk);
197*f005ef32Sjsg #else
198*f005ef32Sjsg 			pool_put(&amdgpu_mux_chunk_slab, chunk);
199*f005ef32Sjsg #endif
200*f005ef32Sjsg 		}
201*f005ef32Sjsg 	}
202*f005ef32Sjsg #ifdef __linux__
203*f005ef32Sjsg 	kmem_cache_destroy(amdgpu_mux_chunk_slab);
204*f005ef32Sjsg #else
205*f005ef32Sjsg 	pool_destroy(&amdgpu_mux_chunk_slab);
206*f005ef32Sjsg #endif
207*f005ef32Sjsg 	kfree(mux->ring_entry);
208*f005ef32Sjsg 	mux->ring_entry = NULL;
209*f005ef32Sjsg 	mux->num_ring_entries = 0;
210*f005ef32Sjsg 	mux->ring_entry_size = 0;
211*f005ef32Sjsg }
212*f005ef32Sjsg 
amdgpu_ring_mux_add_sw_ring(struct amdgpu_ring_mux * mux,struct amdgpu_ring * ring)213*f005ef32Sjsg int amdgpu_ring_mux_add_sw_ring(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
214*f005ef32Sjsg {
215*f005ef32Sjsg 	struct amdgpu_mux_entry *e;
216*f005ef32Sjsg 
217*f005ef32Sjsg 	if (mux->num_ring_entries >= mux->ring_entry_size) {
218*f005ef32Sjsg 		DRM_ERROR("add sw ring exceeding max entry size\n");
219*f005ef32Sjsg 		return -ENOENT;
220*f005ef32Sjsg 	}
221*f005ef32Sjsg 
222*f005ef32Sjsg 	e = &mux->ring_entry[mux->num_ring_entries];
223*f005ef32Sjsg 	ring->entry_index = mux->num_ring_entries;
224*f005ef32Sjsg 	e->ring = ring;
225*f005ef32Sjsg 
226*f005ef32Sjsg 	INIT_LIST_HEAD(&e->list);
227*f005ef32Sjsg 	mux->num_ring_entries += 1;
228*f005ef32Sjsg 	return 0;
229*f005ef32Sjsg }
230*f005ef32Sjsg 
amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux * mux,struct amdgpu_ring * ring,u64 wptr)231*f005ef32Sjsg void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr)
232*f005ef32Sjsg {
233*f005ef32Sjsg 	struct amdgpu_mux_entry *e;
234*f005ef32Sjsg 
235*f005ef32Sjsg 	spin_lock(&mux->lock);
236*f005ef32Sjsg 
237*f005ef32Sjsg 	if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT)
238*f005ef32Sjsg 		amdgpu_mux_resubmit_chunks(mux);
239*f005ef32Sjsg 
240*f005ef32Sjsg 	e = amdgpu_ring_mux_sw_entry(mux, ring);
241*f005ef32Sjsg 	if (!e) {
242*f005ef32Sjsg 		DRM_ERROR("cannot find entry for sw ring\n");
243*f005ef32Sjsg 		spin_unlock(&mux->lock);
244*f005ef32Sjsg 		return;
245*f005ef32Sjsg 	}
246*f005ef32Sjsg 
247*f005ef32Sjsg 	/* We could skip this set wptr as preemption in process. */
248*f005ef32Sjsg 	if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && mux->pending_trailing_fence_signaled) {
249*f005ef32Sjsg 		spin_unlock(&mux->lock);
250*f005ef32Sjsg 		return;
251*f005ef32Sjsg 	}
252*f005ef32Sjsg 
253*f005ef32Sjsg 	e->sw_cptr = e->sw_wptr;
254*f005ef32Sjsg 	/* Update cptr if the package already copied in resubmit functions */
255*f005ef32Sjsg 	if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && e->sw_cptr < mux->wptr_resubmit)
256*f005ef32Sjsg 		e->sw_cptr = mux->wptr_resubmit;
257*f005ef32Sjsg 	e->sw_wptr = wptr;
258*f005ef32Sjsg 	e->start_ptr_in_hw_ring = mux->real_ring->wptr;
259*f005ef32Sjsg 
260*f005ef32Sjsg 	/* Skip copying for the packages already resubmitted.*/
261*f005ef32Sjsg 	if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT || mux->wptr_resubmit < wptr) {
262*f005ef32Sjsg 		amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr);
263*f005ef32Sjsg 		e->end_ptr_in_hw_ring = mux->real_ring->wptr;
264*f005ef32Sjsg 		amdgpu_ring_commit(mux->real_ring);
265*f005ef32Sjsg 	} else {
266*f005ef32Sjsg 		e->end_ptr_in_hw_ring = mux->real_ring->wptr;
267*f005ef32Sjsg 	}
268*f005ef32Sjsg 	spin_unlock(&mux->lock);
269*f005ef32Sjsg }
270*f005ef32Sjsg 
amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux * mux,struct amdgpu_ring * ring)271*f005ef32Sjsg u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
272*f005ef32Sjsg {
273*f005ef32Sjsg 	struct amdgpu_mux_entry *e;
274*f005ef32Sjsg 
275*f005ef32Sjsg 	e = amdgpu_ring_mux_sw_entry(mux, ring);
276*f005ef32Sjsg 	if (!e) {
277*f005ef32Sjsg 		DRM_ERROR("cannot find entry for sw ring\n");
278*f005ef32Sjsg 		return 0;
279*f005ef32Sjsg 	}
280*f005ef32Sjsg 
281*f005ef32Sjsg 	return e->sw_wptr;
282*f005ef32Sjsg }
283*f005ef32Sjsg 
284*f005ef32Sjsg /**
285*f005ef32Sjsg  * amdgpu_ring_mux_get_rptr - get the readptr of the software ring
286*f005ef32Sjsg  * @mux: the multiplexer the software rings attach to
287*f005ef32Sjsg  * @ring: the software ring of which we calculate the readptr
288*f005ef32Sjsg  *
289*f005ef32Sjsg  * The return value of the readptr is not precise while the other rings could
290*f005ef32Sjsg  * write data onto the real ring buffer.After overwriting on the real ring, we
291*f005ef32Sjsg  * can not decide if our packages have been excuted or not read yet. However,
292*f005ef32Sjsg  * this function is only called by the tools such as umr to collect the latest
293*f005ef32Sjsg  * packages for the hang analysis. We assume the hang happens near our latest
294*f005ef32Sjsg  * submit. Thus we could use the following logic to give the clue:
295*f005ef32Sjsg  * If the readptr is between start and end, then we return the copy pointer
296*f005ef32Sjsg  * plus the distance from start to readptr. If the readptr is before start, we
297*f005ef32Sjsg  * return the copy pointer. Lastly, if the readptr is past end, we return the
298*f005ef32Sjsg  * write pointer.
299*f005ef32Sjsg  */
amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux * mux,struct amdgpu_ring * ring)300*f005ef32Sjsg u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
301*f005ef32Sjsg {
302*f005ef32Sjsg 	struct amdgpu_mux_entry *e;
303*f005ef32Sjsg 	u64 readp, offset, start, end;
304*f005ef32Sjsg 
305*f005ef32Sjsg 	e = amdgpu_ring_mux_sw_entry(mux, ring);
306*f005ef32Sjsg 	if (!e) {
307*f005ef32Sjsg 		DRM_ERROR("no sw entry found!\n");
308*f005ef32Sjsg 		return 0;
309*f005ef32Sjsg 	}
310*f005ef32Sjsg 
311*f005ef32Sjsg 	readp = amdgpu_ring_get_rptr(mux->real_ring);
312*f005ef32Sjsg 
313*f005ef32Sjsg 	start = e->start_ptr_in_hw_ring & mux->real_ring->buf_mask;
314*f005ef32Sjsg 	end = e->end_ptr_in_hw_ring & mux->real_ring->buf_mask;
315*f005ef32Sjsg 	if (start > end) {
316*f005ef32Sjsg 		if (readp <= end)
317*f005ef32Sjsg 			readp += mux->real_ring->ring_size >> 2;
318*f005ef32Sjsg 		end += mux->real_ring->ring_size >> 2;
319*f005ef32Sjsg 	}
320*f005ef32Sjsg 
321*f005ef32Sjsg 	if (start <= readp && readp <= end) {
322*f005ef32Sjsg 		offset = readp - start;
323*f005ef32Sjsg 		e->sw_rptr = (e->sw_cptr + offset) & ring->buf_mask;
324*f005ef32Sjsg 	} else if (readp < start) {
325*f005ef32Sjsg 		e->sw_rptr = e->sw_cptr;
326*f005ef32Sjsg 	} else {
327*f005ef32Sjsg 		/* end < readptr */
328*f005ef32Sjsg 		e->sw_rptr = e->sw_wptr;
329*f005ef32Sjsg 	}
330*f005ef32Sjsg 
331*f005ef32Sjsg 	return e->sw_rptr;
332*f005ef32Sjsg }
333*f005ef32Sjsg 
amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring * ring)334*f005ef32Sjsg u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring)
335*f005ef32Sjsg {
336*f005ef32Sjsg 	struct amdgpu_device *adev = ring->adev;
337*f005ef32Sjsg 	struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
338*f005ef32Sjsg 
339*f005ef32Sjsg 	WARN_ON(!ring->is_sw_ring);
340*f005ef32Sjsg 	return amdgpu_ring_mux_get_rptr(mux, ring);
341*f005ef32Sjsg }
342*f005ef32Sjsg 
amdgpu_sw_ring_get_wptr_gfx(struct amdgpu_ring * ring)343*f005ef32Sjsg u64 amdgpu_sw_ring_get_wptr_gfx(struct amdgpu_ring *ring)
344*f005ef32Sjsg {
345*f005ef32Sjsg 	struct amdgpu_device *adev = ring->adev;
346*f005ef32Sjsg 	struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
347*f005ef32Sjsg 
348*f005ef32Sjsg 	WARN_ON(!ring->is_sw_ring);
349*f005ef32Sjsg 	return amdgpu_ring_mux_get_wptr(mux, ring);
350*f005ef32Sjsg }
351*f005ef32Sjsg 
amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring * ring)352*f005ef32Sjsg void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring)
353*f005ef32Sjsg {
354*f005ef32Sjsg 	struct amdgpu_device *adev = ring->adev;
355*f005ef32Sjsg 	struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
356*f005ef32Sjsg 
357*f005ef32Sjsg 	WARN_ON(!ring->is_sw_ring);
358*f005ef32Sjsg 	amdgpu_ring_mux_set_wptr(mux, ring, ring->wptr);
359*f005ef32Sjsg }
360*f005ef32Sjsg 
361*f005ef32Sjsg /* Override insert_nop to prevent emitting nops to the software rings */
amdgpu_sw_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)362*f005ef32Sjsg void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
363*f005ef32Sjsg {
364*f005ef32Sjsg 	WARN_ON(!ring->is_sw_ring);
365*f005ef32Sjsg }
366*f005ef32Sjsg 
amdgpu_sw_ring_name(int idx)367*f005ef32Sjsg const char *amdgpu_sw_ring_name(int idx)
368*f005ef32Sjsg {
369*f005ef32Sjsg 	return idx < ARRAY_SIZE(sw_ring_info) ?
370*f005ef32Sjsg 		sw_ring_info[idx].ring_name : NULL;
371*f005ef32Sjsg }
372*f005ef32Sjsg 
amdgpu_sw_ring_priority(int idx)373*f005ef32Sjsg unsigned int amdgpu_sw_ring_priority(int idx)
374*f005ef32Sjsg {
375*f005ef32Sjsg 	return idx < ARRAY_SIZE(sw_ring_info) ?
376*f005ef32Sjsg 		sw_ring_info[idx].hw_pio : AMDGPU_RING_PRIO_DEFAULT;
377*f005ef32Sjsg }
378*f005ef32Sjsg 
379*f005ef32Sjsg /*Scan on low prio rings to have unsignaled fence and high ring has no fence.*/
amdgpu_mcbp_scan(struct amdgpu_ring_mux * mux)380*f005ef32Sjsg static int amdgpu_mcbp_scan(struct amdgpu_ring_mux *mux)
381*f005ef32Sjsg {
382*f005ef32Sjsg 	struct amdgpu_ring *ring;
383*f005ef32Sjsg 	int i, need_preempt;
384*f005ef32Sjsg 
385*f005ef32Sjsg 	need_preempt = 0;
386*f005ef32Sjsg 	for (i = 0; i < mux->num_ring_entries; i++) {
387*f005ef32Sjsg 		ring = mux->ring_entry[i].ring;
388*f005ef32Sjsg 		if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT &&
389*f005ef32Sjsg 		    amdgpu_fence_count_emitted(ring) > 0)
390*f005ef32Sjsg 			return 0;
391*f005ef32Sjsg 		if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT &&
392*f005ef32Sjsg 		    amdgpu_fence_last_unsignaled_time_us(ring) >
393*f005ef32Sjsg 		    AMDGPU_MAX_LAST_UNSIGNALED_THRESHOLD_US)
394*f005ef32Sjsg 			need_preempt = 1;
395*f005ef32Sjsg 	}
396*f005ef32Sjsg 	return need_preempt && !mux->s_resubmit;
397*f005ef32Sjsg }
398*f005ef32Sjsg 
399*f005ef32Sjsg /* Trigger Mid-Command Buffer Preemption (MCBP) and find if we need to resubmit. */
amdgpu_mcbp_trigger_preempt(struct amdgpu_ring_mux * mux)400*f005ef32Sjsg static int amdgpu_mcbp_trigger_preempt(struct amdgpu_ring_mux *mux)
401*f005ef32Sjsg {
402*f005ef32Sjsg 	int r;
403*f005ef32Sjsg 
404*f005ef32Sjsg 	spin_lock(&mux->lock);
405*f005ef32Sjsg 	mux->pending_trailing_fence_signaled = true;
406*f005ef32Sjsg 	r = amdgpu_ring_preempt_ib(mux->real_ring);
407*f005ef32Sjsg 	spin_unlock(&mux->lock);
408*f005ef32Sjsg 	return r;
409*f005ef32Sjsg }
410*f005ef32Sjsg 
amdgpu_sw_ring_ib_begin(struct amdgpu_ring * ring)411*f005ef32Sjsg void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring)
412*f005ef32Sjsg {
413*f005ef32Sjsg 	struct amdgpu_device *adev = ring->adev;
414*f005ef32Sjsg 	struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
415*f005ef32Sjsg 
416*f005ef32Sjsg 	WARN_ON(!ring->is_sw_ring);
417*f005ef32Sjsg 	if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
418*f005ef32Sjsg 		if (amdgpu_mcbp_scan(mux) > 0)
419*f005ef32Sjsg 			amdgpu_mcbp_trigger_preempt(mux);
420*f005ef32Sjsg 		return;
421*f005ef32Sjsg 	}
422*f005ef32Sjsg 
423*f005ef32Sjsg 	amdgpu_ring_mux_start_ib(mux, ring);
424*f005ef32Sjsg }
425*f005ef32Sjsg 
amdgpu_sw_ring_ib_end(struct amdgpu_ring * ring)426*f005ef32Sjsg void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
427*f005ef32Sjsg {
428*f005ef32Sjsg 	struct amdgpu_device *adev = ring->adev;
429*f005ef32Sjsg 	struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
430*f005ef32Sjsg 
431*f005ef32Sjsg 	WARN_ON(!ring->is_sw_ring);
432*f005ef32Sjsg 	if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
433*f005ef32Sjsg 		return;
434*f005ef32Sjsg 	amdgpu_ring_mux_end_ib(mux, ring);
435*f005ef32Sjsg }
436*f005ef32Sjsg 
amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring * ring,enum amdgpu_ring_mux_offset_type type)437*f005ef32Sjsg void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type)
438*f005ef32Sjsg {
439*f005ef32Sjsg 	struct amdgpu_device *adev = ring->adev;
440*f005ef32Sjsg 	struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
441*f005ef32Sjsg 	unsigned offset;
442*f005ef32Sjsg 
443*f005ef32Sjsg 	if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
444*f005ef32Sjsg 		return;
445*f005ef32Sjsg 
446*f005ef32Sjsg 	offset = ring->wptr & ring->buf_mask;
447*f005ef32Sjsg 
448*f005ef32Sjsg 	amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type);
449*f005ef32Sjsg }
450*f005ef32Sjsg 
amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux * mux,struct amdgpu_ring * ring)451*f005ef32Sjsg void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
452*f005ef32Sjsg {
453*f005ef32Sjsg 	struct amdgpu_mux_entry *e;
454*f005ef32Sjsg 	struct amdgpu_mux_chunk *chunk;
455*f005ef32Sjsg 
456*f005ef32Sjsg 	spin_lock(&mux->lock);
457*f005ef32Sjsg 	amdgpu_mux_resubmit_chunks(mux);
458*f005ef32Sjsg 	spin_unlock(&mux->lock);
459*f005ef32Sjsg 
460*f005ef32Sjsg 	e = amdgpu_ring_mux_sw_entry(mux, ring);
461*f005ef32Sjsg 	if (!e) {
462*f005ef32Sjsg 		DRM_ERROR("cannot find entry!\n");
463*f005ef32Sjsg 		return;
464*f005ef32Sjsg 	}
465*f005ef32Sjsg 
466*f005ef32Sjsg #ifdef __linux__
467*f005ef32Sjsg 	chunk = kmem_cache_alloc(amdgpu_mux_chunk_slab, GFP_KERNEL);
468*f005ef32Sjsg #else
469*f005ef32Sjsg 	chunk = pool_get(&amdgpu_mux_chunk_slab, PR_WAITOK);
470*f005ef32Sjsg #endif
471*f005ef32Sjsg 	if (!chunk) {
472*f005ef32Sjsg 		DRM_ERROR("alloc amdgpu_mux_chunk_slab failed\n");
473*f005ef32Sjsg 		return;
474*f005ef32Sjsg 	}
475*f005ef32Sjsg 
476*f005ef32Sjsg 	chunk->start = ring->wptr;
477*f005ef32Sjsg 	/* the initialized value used to check if they are set by the ib submission*/
478*f005ef32Sjsg 	chunk->cntl_offset = ring->buf_mask + 1;
479*f005ef32Sjsg 	chunk->de_offset = ring->buf_mask + 1;
480*f005ef32Sjsg 	chunk->ce_offset = ring->buf_mask + 1;
481*f005ef32Sjsg 	list_add_tail(&chunk->entry, &e->list);
482*f005ef32Sjsg }
483*f005ef32Sjsg 
scan_and_remove_signaled_chunk(struct amdgpu_ring_mux * mux,struct amdgpu_ring * ring)484*f005ef32Sjsg static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
485*f005ef32Sjsg {
486*f005ef32Sjsg 	uint32_t last_seq = 0;
487*f005ef32Sjsg 	struct amdgpu_mux_entry *e;
488*f005ef32Sjsg 	struct amdgpu_mux_chunk *chunk, *tmp;
489*f005ef32Sjsg 
490*f005ef32Sjsg 	e = amdgpu_ring_mux_sw_entry(mux, ring);
491*f005ef32Sjsg 	if (!e) {
492*f005ef32Sjsg 		DRM_ERROR("cannot find entry!\n");
493*f005ef32Sjsg 		return;
494*f005ef32Sjsg 	}
495*f005ef32Sjsg 
496*f005ef32Sjsg 	last_seq = atomic_read(&ring->fence_drv.last_seq);
497*f005ef32Sjsg 
498*f005ef32Sjsg 	list_for_each_entry_safe(chunk, tmp, &e->list, entry) {
499*f005ef32Sjsg 		if (chunk->sync_seq <= last_seq) {
500*f005ef32Sjsg 			list_del(&chunk->entry);
501*f005ef32Sjsg #ifdef __linux__
502*f005ef32Sjsg 			kmem_cache_free(amdgpu_mux_chunk_slab, chunk);
503*f005ef32Sjsg #else
504*f005ef32Sjsg 			pool_put(&amdgpu_mux_chunk_slab, chunk);
505*f005ef32Sjsg #endif
506*f005ef32Sjsg 		}
507*f005ef32Sjsg 	}
508*f005ef32Sjsg }
509*f005ef32Sjsg 
amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux * mux,struct amdgpu_ring * ring,u64 offset,enum amdgpu_ring_mux_offset_type type)510*f005ef32Sjsg void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux,
511*f005ef32Sjsg 				    struct amdgpu_ring *ring, u64 offset,
512*f005ef32Sjsg 				    enum amdgpu_ring_mux_offset_type type)
513*f005ef32Sjsg {
514*f005ef32Sjsg 	struct amdgpu_mux_entry *e;
515*f005ef32Sjsg 	struct amdgpu_mux_chunk *chunk;
516*f005ef32Sjsg 
517*f005ef32Sjsg 	e = amdgpu_ring_mux_sw_entry(mux, ring);
518*f005ef32Sjsg 	if (!e) {
519*f005ef32Sjsg 		DRM_ERROR("cannot find entry!\n");
520*f005ef32Sjsg 		return;
521*f005ef32Sjsg 	}
522*f005ef32Sjsg 
523*f005ef32Sjsg 	chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
524*f005ef32Sjsg 	if (!chunk) {
525*f005ef32Sjsg 		DRM_ERROR("cannot find chunk!\n");
526*f005ef32Sjsg 		return;
527*f005ef32Sjsg 	}
528*f005ef32Sjsg 
529*f005ef32Sjsg 	switch (type) {
530*f005ef32Sjsg 	case AMDGPU_MUX_OFFSET_TYPE_CONTROL:
531*f005ef32Sjsg 		chunk->cntl_offset = offset;
532*f005ef32Sjsg 		break;
533*f005ef32Sjsg 	case AMDGPU_MUX_OFFSET_TYPE_DE:
534*f005ef32Sjsg 		chunk->de_offset = offset;
535*f005ef32Sjsg 		break;
536*f005ef32Sjsg 	case AMDGPU_MUX_OFFSET_TYPE_CE:
537*f005ef32Sjsg 		chunk->ce_offset = offset;
538*f005ef32Sjsg 		break;
539*f005ef32Sjsg 	default:
540*f005ef32Sjsg 		DRM_ERROR("invalid type (%d)\n", type);
541*f005ef32Sjsg 		break;
542*f005ef32Sjsg 	}
543*f005ef32Sjsg }
544*f005ef32Sjsg 
amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux * mux,struct amdgpu_ring * ring)545*f005ef32Sjsg void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
546*f005ef32Sjsg {
547*f005ef32Sjsg 	struct amdgpu_mux_entry *e;
548*f005ef32Sjsg 	struct amdgpu_mux_chunk *chunk;
549*f005ef32Sjsg 
550*f005ef32Sjsg 	e = amdgpu_ring_mux_sw_entry(mux, ring);
551*f005ef32Sjsg 	if (!e) {
552*f005ef32Sjsg 		DRM_ERROR("cannot find entry!\n");
553*f005ef32Sjsg 		return;
554*f005ef32Sjsg 	}
555*f005ef32Sjsg 
556*f005ef32Sjsg 	chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
557*f005ef32Sjsg 	if (!chunk) {
558*f005ef32Sjsg 		DRM_ERROR("cannot find chunk!\n");
559*f005ef32Sjsg 		return;
560*f005ef32Sjsg 	}
561*f005ef32Sjsg 
562*f005ef32Sjsg 	chunk->end = ring->wptr;
563*f005ef32Sjsg 	chunk->sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
564*f005ef32Sjsg 
565*f005ef32Sjsg 	scan_and_remove_signaled_chunk(mux, ring);
566*f005ef32Sjsg }
567*f005ef32Sjsg 
amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux * mux)568*f005ef32Sjsg bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux)
569*f005ef32Sjsg {
570*f005ef32Sjsg 	struct amdgpu_mux_entry *e;
571*f005ef32Sjsg 	struct amdgpu_ring *ring = NULL;
572*f005ef32Sjsg 	int i;
573*f005ef32Sjsg 
574*f005ef32Sjsg 	if (!mux->pending_trailing_fence_signaled)
575*f005ef32Sjsg 		return false;
576*f005ef32Sjsg 
577*f005ef32Sjsg 	if (mux->real_ring->trail_seq != le32_to_cpu(*mux->real_ring->trail_fence_cpu_addr))
578*f005ef32Sjsg 		return false;
579*f005ef32Sjsg 
580*f005ef32Sjsg 	for (i = 0; i < mux->num_ring_entries; i++) {
581*f005ef32Sjsg 		e = &mux->ring_entry[i];
582*f005ef32Sjsg 		if (e->ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) {
583*f005ef32Sjsg 			ring = e->ring;
584*f005ef32Sjsg 			break;
585*f005ef32Sjsg 		}
586*f005ef32Sjsg 	}
587*f005ef32Sjsg 
588*f005ef32Sjsg 	if (!ring) {
589*f005ef32Sjsg 		DRM_ERROR("cannot find low priority ring\n");
590*f005ef32Sjsg 		return false;
591*f005ef32Sjsg 	}
592*f005ef32Sjsg 
593*f005ef32Sjsg 	amdgpu_fence_process(ring);
594*f005ef32Sjsg 	if (amdgpu_fence_count_emitted(ring) > 0) {
595*f005ef32Sjsg 		mux->s_resubmit = true;
596*f005ef32Sjsg 		mux->seqno_to_resubmit = ring->fence_drv.sync_seq;
597*f005ef32Sjsg 		amdgpu_ring_mux_schedule_resubmit(mux);
598*f005ef32Sjsg 	}
599*f005ef32Sjsg 
600*f005ef32Sjsg 	mux->pending_trailing_fence_signaled = false;
601*f005ef32Sjsg 	return true;
602*f005ef32Sjsg }
603