xref: /dflybsd-src/sys/dev/drm/amd/amdgpu/amdgpu_ctx.c (revision b843c749addef9340ee7d4e250b09fdd492602a1)
1*b843c749SSergey Zigachev /*
2*b843c749SSergey Zigachev  * Copyright 2015 Advanced Micro Devices, Inc.
3*b843c749SSergey Zigachev  *
4*b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
5*b843c749SSergey Zigachev  * copy of this software and associated documentation files (the "Software"),
6*b843c749SSergey Zigachev  * to deal in the Software without restriction, including without limitation
7*b843c749SSergey Zigachev  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*b843c749SSergey Zigachev  * and/or sell copies of the Software, and to permit persons to whom the
9*b843c749SSergey Zigachev  * Software is furnished to do so, subject to the following conditions:
10*b843c749SSergey Zigachev  *
11*b843c749SSergey Zigachev  * The above copyright notice and this permission notice shall be included in
12*b843c749SSergey Zigachev  * all copies or substantial portions of the Software.
13*b843c749SSergey Zigachev  *
14*b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*b843c749SSergey Zigachev  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*b843c749SSergey Zigachev  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*b843c749SSergey Zigachev  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*b843c749SSergey Zigachev  * OTHER DEALINGS IN THE SOFTWARE.
21*b843c749SSergey Zigachev  *
22*b843c749SSergey Zigachev  * Authors: monk liu <monk.liu@amd.com>
23*b843c749SSergey Zigachev  */
24*b843c749SSergey Zigachev 
25*b843c749SSergey Zigachev #include <drm/drmP.h>
26*b843c749SSergey Zigachev #include <drm/drm_auth.h>
27*b843c749SSergey Zigachev #include "amdgpu.h"
28*b843c749SSergey Zigachev #include "amdgpu_sched.h"
29*b843c749SSergey Zigachev 
30*b843c749SSergey Zigachev static int amdgpu_ctx_priority_permit(struct drm_file *filp,
31*b843c749SSergey Zigachev 				      enum drm_sched_priority priority)
32*b843c749SSergey Zigachev {
33*b843c749SSergey Zigachev 	/* NORMAL and below are accessible by everyone */
34*b843c749SSergey Zigachev 	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
35*b843c749SSergey Zigachev 		return 0;
36*b843c749SSergey Zigachev 
37*b843c749SSergey Zigachev 	if (capable(CAP_SYS_NICE))
38*b843c749SSergey Zigachev 		return 0;
39*b843c749SSergey Zigachev 
40*b843c749SSergey Zigachev 	if (drm_is_current_master(filp))
41*b843c749SSergey Zigachev 		return 0;
42*b843c749SSergey Zigachev 
43*b843c749SSergey Zigachev 	return -EACCES;
44*b843c749SSergey Zigachev }
45*b843c749SSergey Zigachev 
46*b843c749SSergey Zigachev static int amdgpu_ctx_init(struct amdgpu_device *adev,
47*b843c749SSergey Zigachev 			   enum drm_sched_priority priority,
48*b843c749SSergey Zigachev 			   struct drm_file *filp,
49*b843c749SSergey Zigachev 			   struct amdgpu_ctx *ctx)
50*b843c749SSergey Zigachev {
51*b843c749SSergey Zigachev 	unsigned i, j;
52*b843c749SSergey Zigachev 	int r;
53*b843c749SSergey Zigachev 
54*b843c749SSergey Zigachev 	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
55*b843c749SSergey Zigachev 		return -EINVAL;
56*b843c749SSergey Zigachev 
57*b843c749SSergey Zigachev 	r = amdgpu_ctx_priority_permit(filp, priority);
58*b843c749SSergey Zigachev 	if (r)
59*b843c749SSergey Zigachev 		return r;
60*b843c749SSergey Zigachev 
61*b843c749SSergey Zigachev 	memset(ctx, 0, sizeof(*ctx));
62*b843c749SSergey Zigachev 	ctx->adev = adev;
63*b843c749SSergey Zigachev 	kref_init(&ctx->refcount);
64*b843c749SSergey Zigachev 	spin_lock_init(&ctx->ring_lock);
65*b843c749SSergey Zigachev 	ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
66*b843c749SSergey Zigachev 			      sizeof(struct dma_fence*), GFP_KERNEL);
67*b843c749SSergey Zigachev 	if (!ctx->fences)
68*b843c749SSergey Zigachev 		return -ENOMEM;
69*b843c749SSergey Zigachev 
70*b843c749SSergey Zigachev 	mutex_init(&ctx->lock);
71*b843c749SSergey Zigachev 
72*b843c749SSergey Zigachev 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
73*b843c749SSergey Zigachev 		ctx->rings[i].sequence = 1;
74*b843c749SSergey Zigachev 		ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
75*b843c749SSergey Zigachev 	}
76*b843c749SSergey Zigachev 
77*b843c749SSergey Zigachev 	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
78*b843c749SSergey Zigachev 	ctx->reset_counter_query = ctx->reset_counter;
79*b843c749SSergey Zigachev 	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
80*b843c749SSergey Zigachev 	ctx->init_priority = priority;
81*b843c749SSergey Zigachev 	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
82*b843c749SSergey Zigachev 
83*b843c749SSergey Zigachev 	/* create context entity for each ring */
84*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_rings; i++) {
85*b843c749SSergey Zigachev 		struct amdgpu_ring *ring = adev->rings[i];
86*b843c749SSergey Zigachev 		struct drm_sched_rq *rq;
87*b843c749SSergey Zigachev 
88*b843c749SSergey Zigachev 		rq = &ring->sched.sched_rq[priority];
89*b843c749SSergey Zigachev 
90*b843c749SSergey Zigachev 		if (ring == &adev->gfx.kiq.ring)
91*b843c749SSergey Zigachev 			continue;
92*b843c749SSergey Zigachev 
93*b843c749SSergey Zigachev 		r = drm_sched_entity_init(&ctx->rings[i].entity,
94*b843c749SSergey Zigachev 					  &rq, 1, &ctx->guilty);
95*b843c749SSergey Zigachev 		if (r)
96*b843c749SSergey Zigachev 			goto failed;
97*b843c749SSergey Zigachev 	}
98*b843c749SSergey Zigachev 
99*b843c749SSergey Zigachev 	r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
100*b843c749SSergey Zigachev 	if (r)
101*b843c749SSergey Zigachev 		goto failed;
102*b843c749SSergey Zigachev 
103*b843c749SSergey Zigachev 	return 0;
104*b843c749SSergey Zigachev 
105*b843c749SSergey Zigachev failed:
106*b843c749SSergey Zigachev 	for (j = 0; j < i; j++)
107*b843c749SSergey Zigachev 		drm_sched_entity_destroy(&ctx->rings[j].entity);
108*b843c749SSergey Zigachev 	kfree(ctx->fences);
109*b843c749SSergey Zigachev 	ctx->fences = NULL;
110*b843c749SSergey Zigachev 	return r;
111*b843c749SSergey Zigachev }
112*b843c749SSergey Zigachev 
113*b843c749SSergey Zigachev static void amdgpu_ctx_fini(struct kref *ref)
114*b843c749SSergey Zigachev {
115*b843c749SSergey Zigachev 	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
116*b843c749SSergey Zigachev 	struct amdgpu_device *adev = ctx->adev;
117*b843c749SSergey Zigachev 	unsigned i, j;
118*b843c749SSergey Zigachev 
119*b843c749SSergey Zigachev 	if (!adev)
120*b843c749SSergey Zigachev 		return;
121*b843c749SSergey Zigachev 
122*b843c749SSergey Zigachev 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
123*b843c749SSergey Zigachev 		for (j = 0; j < amdgpu_sched_jobs; ++j)
124*b843c749SSergey Zigachev 			dma_fence_put(ctx->rings[i].fences[j]);
125*b843c749SSergey Zigachev 	kfree(ctx->fences);
126*b843c749SSergey Zigachev 	ctx->fences = NULL;
127*b843c749SSergey Zigachev 
128*b843c749SSergey Zigachev 	amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
129*b843c749SSergey Zigachev 
130*b843c749SSergey Zigachev 	mutex_destroy(&ctx->lock);
131*b843c749SSergey Zigachev 
132*b843c749SSergey Zigachev 	kfree(ctx);
133*b843c749SSergey Zigachev }
134*b843c749SSergey Zigachev 
135*b843c749SSergey Zigachev static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
136*b843c749SSergey Zigachev 			    struct amdgpu_fpriv *fpriv,
137*b843c749SSergey Zigachev 			    struct drm_file *filp,
138*b843c749SSergey Zigachev 			    enum drm_sched_priority priority,
139*b843c749SSergey Zigachev 			    uint32_t *id)
140*b843c749SSergey Zigachev {
141*b843c749SSergey Zigachev 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
142*b843c749SSergey Zigachev 	struct amdgpu_ctx *ctx;
143*b843c749SSergey Zigachev 	int r;
144*b843c749SSergey Zigachev 
145*b843c749SSergey Zigachev 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
146*b843c749SSergey Zigachev 	if (!ctx)
147*b843c749SSergey Zigachev 		return -ENOMEM;
148*b843c749SSergey Zigachev 
149*b843c749SSergey Zigachev 	mutex_lock(&mgr->lock);
150*b843c749SSergey Zigachev 	r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
151*b843c749SSergey Zigachev 	if (r < 0) {
152*b843c749SSergey Zigachev 		mutex_unlock(&mgr->lock);
153*b843c749SSergey Zigachev 		kfree(ctx);
154*b843c749SSergey Zigachev 		return r;
155*b843c749SSergey Zigachev 	}
156*b843c749SSergey Zigachev 
157*b843c749SSergey Zigachev 	*id = (uint32_t)r;
158*b843c749SSergey Zigachev 	r = amdgpu_ctx_init(adev, priority, filp, ctx);
159*b843c749SSergey Zigachev 	if (r) {
160*b843c749SSergey Zigachev 		idr_remove(&mgr->ctx_handles, *id);
161*b843c749SSergey Zigachev 		*id = 0;
162*b843c749SSergey Zigachev 		kfree(ctx);
163*b843c749SSergey Zigachev 	}
164*b843c749SSergey Zigachev 	mutex_unlock(&mgr->lock);
165*b843c749SSergey Zigachev 	return r;
166*b843c749SSergey Zigachev }
167*b843c749SSergey Zigachev 
168*b843c749SSergey Zigachev static void amdgpu_ctx_do_release(struct kref *ref)
169*b843c749SSergey Zigachev {
170*b843c749SSergey Zigachev 	struct amdgpu_ctx *ctx;
171*b843c749SSergey Zigachev 	u32 i;
172*b843c749SSergey Zigachev 
173*b843c749SSergey Zigachev 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
174*b843c749SSergey Zigachev 
175*b843c749SSergey Zigachev 	for (i = 0; i < ctx->adev->num_rings; i++) {
176*b843c749SSergey Zigachev 
177*b843c749SSergey Zigachev 		if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
178*b843c749SSergey Zigachev 			continue;
179*b843c749SSergey Zigachev 
180*b843c749SSergey Zigachev 		drm_sched_entity_destroy(&ctx->rings[i].entity);
181*b843c749SSergey Zigachev 	}
182*b843c749SSergey Zigachev 
183*b843c749SSergey Zigachev 	amdgpu_ctx_fini(ref);
184*b843c749SSergey Zigachev }
185*b843c749SSergey Zigachev 
186*b843c749SSergey Zigachev static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
187*b843c749SSergey Zigachev {
188*b843c749SSergey Zigachev 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
189*b843c749SSergey Zigachev 	struct amdgpu_ctx *ctx;
190*b843c749SSergey Zigachev 
191*b843c749SSergey Zigachev 	mutex_lock(&mgr->lock);
192*b843c749SSergey Zigachev 	ctx = idr_remove(&mgr->ctx_handles, id);
193*b843c749SSergey Zigachev 	if (ctx)
194*b843c749SSergey Zigachev 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
195*b843c749SSergey Zigachev 	mutex_unlock(&mgr->lock);
196*b843c749SSergey Zigachev 	return ctx ? 0 : -EINVAL;
197*b843c749SSergey Zigachev }
198*b843c749SSergey Zigachev 
199*b843c749SSergey Zigachev static int amdgpu_ctx_query(struct amdgpu_device *adev,
200*b843c749SSergey Zigachev 			    struct amdgpu_fpriv *fpriv, uint32_t id,
201*b843c749SSergey Zigachev 			    union drm_amdgpu_ctx_out *out)
202*b843c749SSergey Zigachev {
203*b843c749SSergey Zigachev 	struct amdgpu_ctx *ctx;
204*b843c749SSergey Zigachev 	struct amdgpu_ctx_mgr *mgr;
205*b843c749SSergey Zigachev 	unsigned reset_counter;
206*b843c749SSergey Zigachev 
207*b843c749SSergey Zigachev 	if (!fpriv)
208*b843c749SSergey Zigachev 		return -EINVAL;
209*b843c749SSergey Zigachev 
210*b843c749SSergey Zigachev 	mgr = &fpriv->ctx_mgr;
211*b843c749SSergey Zigachev 	mutex_lock(&mgr->lock);
212*b843c749SSergey Zigachev 	ctx = idr_find(&mgr->ctx_handles, id);
213*b843c749SSergey Zigachev 	if (!ctx) {
214*b843c749SSergey Zigachev 		mutex_unlock(&mgr->lock);
215*b843c749SSergey Zigachev 		return -EINVAL;
216*b843c749SSergey Zigachev 	}
217*b843c749SSergey Zigachev 
218*b843c749SSergey Zigachev 	/* TODO: these two are always zero */
219*b843c749SSergey Zigachev 	out->state.flags = 0x0;
220*b843c749SSergey Zigachev 	out->state.hangs = 0x0;
221*b843c749SSergey Zigachev 
222*b843c749SSergey Zigachev 	/* determine if a GPU reset has occured since the last call */
223*b843c749SSergey Zigachev 	reset_counter = atomic_read(&adev->gpu_reset_counter);
224*b843c749SSergey Zigachev 	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
225*b843c749SSergey Zigachev 	if (ctx->reset_counter_query == reset_counter)
226*b843c749SSergey Zigachev 		out->state.reset_status = AMDGPU_CTX_NO_RESET;
227*b843c749SSergey Zigachev 	else
228*b843c749SSergey Zigachev 		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
229*b843c749SSergey Zigachev 	ctx->reset_counter_query = reset_counter;
230*b843c749SSergey Zigachev 
231*b843c749SSergey Zigachev 	mutex_unlock(&mgr->lock);
232*b843c749SSergey Zigachev 	return 0;
233*b843c749SSergey Zigachev }
234*b843c749SSergey Zigachev 
235*b843c749SSergey Zigachev static int amdgpu_ctx_query2(struct amdgpu_device *adev,
236*b843c749SSergey Zigachev 	struct amdgpu_fpriv *fpriv, uint32_t id,
237*b843c749SSergey Zigachev 	union drm_amdgpu_ctx_out *out)
238*b843c749SSergey Zigachev {
239*b843c749SSergey Zigachev 	struct amdgpu_ctx *ctx;
240*b843c749SSergey Zigachev 	struct amdgpu_ctx_mgr *mgr;
241*b843c749SSergey Zigachev 
242*b843c749SSergey Zigachev 	if (!fpriv)
243*b843c749SSergey Zigachev 		return -EINVAL;
244*b843c749SSergey Zigachev 
245*b843c749SSergey Zigachev 	mgr = &fpriv->ctx_mgr;
246*b843c749SSergey Zigachev 	mutex_lock(&mgr->lock);
247*b843c749SSergey Zigachev 	ctx = idr_find(&mgr->ctx_handles, id);
248*b843c749SSergey Zigachev 	if (!ctx) {
249*b843c749SSergey Zigachev 		mutex_unlock(&mgr->lock);
250*b843c749SSergey Zigachev 		return -EINVAL;
251*b843c749SSergey Zigachev 	}
252*b843c749SSergey Zigachev 
253*b843c749SSergey Zigachev 	out->state.flags = 0x0;
254*b843c749SSergey Zigachev 	out->state.hangs = 0x0;
255*b843c749SSergey Zigachev 
256*b843c749SSergey Zigachev 	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
257*b843c749SSergey Zigachev 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
258*b843c749SSergey Zigachev 
259*b843c749SSergey Zigachev 	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
260*b843c749SSergey Zigachev 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
261*b843c749SSergey Zigachev 
262*b843c749SSergey Zigachev 	if (atomic_read(&ctx->guilty))
263*b843c749SSergey Zigachev 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
264*b843c749SSergey Zigachev 
265*b843c749SSergey Zigachev 	mutex_unlock(&mgr->lock);
266*b843c749SSergey Zigachev 	return 0;
267*b843c749SSergey Zigachev }
268*b843c749SSergey Zigachev 
269*b843c749SSergey Zigachev int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
270*b843c749SSergey Zigachev 		     struct drm_file *filp)
271*b843c749SSergey Zigachev {
272*b843c749SSergey Zigachev 	int r;
273*b843c749SSergey Zigachev 	uint32_t id;
274*b843c749SSergey Zigachev 	enum drm_sched_priority priority;
275*b843c749SSergey Zigachev 
276*b843c749SSergey Zigachev 	union drm_amdgpu_ctx *args = data;
277*b843c749SSergey Zigachev 	struct amdgpu_device *adev = dev->dev_private;
278*b843c749SSergey Zigachev 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
279*b843c749SSergey Zigachev 
280*b843c749SSergey Zigachev 	r = 0;
281*b843c749SSergey Zigachev 	id = args->in.ctx_id;
282*b843c749SSergey Zigachev 	priority = amdgpu_to_sched_priority(args->in.priority);
283*b843c749SSergey Zigachev 
284*b843c749SSergey Zigachev 	/* For backwards compatibility reasons, we need to accept
285*b843c749SSergey Zigachev 	 * ioctls with garbage in the priority field */
286*b843c749SSergey Zigachev 	if (priority == DRM_SCHED_PRIORITY_INVALID)
287*b843c749SSergey Zigachev 		priority = DRM_SCHED_PRIORITY_NORMAL;
288*b843c749SSergey Zigachev 
289*b843c749SSergey Zigachev 	switch (args->in.op) {
290*b843c749SSergey Zigachev 	case AMDGPU_CTX_OP_ALLOC_CTX:
291*b843c749SSergey Zigachev 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
292*b843c749SSergey Zigachev 		args->out.alloc.ctx_id = id;
293*b843c749SSergey Zigachev 		break;
294*b843c749SSergey Zigachev 	case AMDGPU_CTX_OP_FREE_CTX:
295*b843c749SSergey Zigachev 		r = amdgpu_ctx_free(fpriv, id);
296*b843c749SSergey Zigachev 		break;
297*b843c749SSergey Zigachev 	case AMDGPU_CTX_OP_QUERY_STATE:
298*b843c749SSergey Zigachev 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
299*b843c749SSergey Zigachev 		break;
300*b843c749SSergey Zigachev 	case AMDGPU_CTX_OP_QUERY_STATE2:
301*b843c749SSergey Zigachev 		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
302*b843c749SSergey Zigachev 		break;
303*b843c749SSergey Zigachev 	default:
304*b843c749SSergey Zigachev 		return -EINVAL;
305*b843c749SSergey Zigachev 	}
306*b843c749SSergey Zigachev 
307*b843c749SSergey Zigachev 	return r;
308*b843c749SSergey Zigachev }
309*b843c749SSergey Zigachev 
310*b843c749SSergey Zigachev struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
311*b843c749SSergey Zigachev {
312*b843c749SSergey Zigachev 	struct amdgpu_ctx *ctx;
313*b843c749SSergey Zigachev 	struct amdgpu_ctx_mgr *mgr;
314*b843c749SSergey Zigachev 
315*b843c749SSergey Zigachev 	if (!fpriv)
316*b843c749SSergey Zigachev 		return NULL;
317*b843c749SSergey Zigachev 
318*b843c749SSergey Zigachev 	mgr = &fpriv->ctx_mgr;
319*b843c749SSergey Zigachev 
320*b843c749SSergey Zigachev 	mutex_lock(&mgr->lock);
321*b843c749SSergey Zigachev 	ctx = idr_find(&mgr->ctx_handles, id);
322*b843c749SSergey Zigachev 	if (ctx)
323*b843c749SSergey Zigachev 		kref_get(&ctx->refcount);
324*b843c749SSergey Zigachev 	mutex_unlock(&mgr->lock);
325*b843c749SSergey Zigachev 	return ctx;
326*b843c749SSergey Zigachev }
327*b843c749SSergey Zigachev 
328*b843c749SSergey Zigachev int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
329*b843c749SSergey Zigachev {
330*b843c749SSergey Zigachev 	if (ctx == NULL)
331*b843c749SSergey Zigachev 		return -EINVAL;
332*b843c749SSergey Zigachev 
333*b843c749SSergey Zigachev 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
334*b843c749SSergey Zigachev 	return 0;
335*b843c749SSergey Zigachev }
336*b843c749SSergey Zigachev 
337*b843c749SSergey Zigachev int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
338*b843c749SSergey Zigachev 			      struct dma_fence *fence, uint64_t* handler)
339*b843c749SSergey Zigachev {
340*b843c749SSergey Zigachev 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
341*b843c749SSergey Zigachev 	uint64_t seq = cring->sequence;
342*b843c749SSergey Zigachev 	unsigned idx = 0;
343*b843c749SSergey Zigachev 	struct dma_fence *other = NULL;
344*b843c749SSergey Zigachev 
345*b843c749SSergey Zigachev 	idx = seq & (amdgpu_sched_jobs - 1);
346*b843c749SSergey Zigachev 	other = cring->fences[idx];
347*b843c749SSergey Zigachev 	if (other)
348*b843c749SSergey Zigachev 		BUG_ON(!dma_fence_is_signaled(other));
349*b843c749SSergey Zigachev 
350*b843c749SSergey Zigachev 	dma_fence_get(fence);
351*b843c749SSergey Zigachev 
352*b843c749SSergey Zigachev 	spin_lock(&ctx->ring_lock);
353*b843c749SSergey Zigachev 	cring->fences[idx] = fence;
354*b843c749SSergey Zigachev 	cring->sequence++;
355*b843c749SSergey Zigachev 	spin_unlock(&ctx->ring_lock);
356*b843c749SSergey Zigachev 
357*b843c749SSergey Zigachev 	dma_fence_put(other);
358*b843c749SSergey Zigachev 	if (handler)
359*b843c749SSergey Zigachev 		*handler = seq;
360*b843c749SSergey Zigachev 
361*b843c749SSergey Zigachev 	return 0;
362*b843c749SSergey Zigachev }
363*b843c749SSergey Zigachev 
364*b843c749SSergey Zigachev struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
365*b843c749SSergey Zigachev 				       struct amdgpu_ring *ring, uint64_t seq)
366*b843c749SSergey Zigachev {
367*b843c749SSergey Zigachev 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
368*b843c749SSergey Zigachev 	struct dma_fence *fence;
369*b843c749SSergey Zigachev 
370*b843c749SSergey Zigachev 	spin_lock(&ctx->ring_lock);
371*b843c749SSergey Zigachev 
372*b843c749SSergey Zigachev 	if (seq == ~0ull)
373*b843c749SSergey Zigachev 		seq = ctx->rings[ring->idx].sequence - 1;
374*b843c749SSergey Zigachev 
375*b843c749SSergey Zigachev 	if (seq >= cring->sequence) {
376*b843c749SSergey Zigachev 		spin_unlock(&ctx->ring_lock);
377*b843c749SSergey Zigachev 		return ERR_PTR(-EINVAL);
378*b843c749SSergey Zigachev 	}
379*b843c749SSergey Zigachev 
380*b843c749SSergey Zigachev 
381*b843c749SSergey Zigachev 	if (seq + amdgpu_sched_jobs < cring->sequence) {
382*b843c749SSergey Zigachev 		spin_unlock(&ctx->ring_lock);
383*b843c749SSergey Zigachev 		return NULL;
384*b843c749SSergey Zigachev 	}
385*b843c749SSergey Zigachev 
386*b843c749SSergey Zigachev 	fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
387*b843c749SSergey Zigachev 	spin_unlock(&ctx->ring_lock);
388*b843c749SSergey Zigachev 
389*b843c749SSergey Zigachev 	return fence;
390*b843c749SSergey Zigachev }
391*b843c749SSergey Zigachev 
392*b843c749SSergey Zigachev void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
393*b843c749SSergey Zigachev 				  enum drm_sched_priority priority)
394*b843c749SSergey Zigachev {
395*b843c749SSergey Zigachev 	int i;
396*b843c749SSergey Zigachev 	struct amdgpu_device *adev = ctx->adev;
397*b843c749SSergey Zigachev 	struct drm_sched_rq *rq;
398*b843c749SSergey Zigachev 	struct drm_sched_entity *entity;
399*b843c749SSergey Zigachev 	struct amdgpu_ring *ring;
400*b843c749SSergey Zigachev 	enum drm_sched_priority ctx_prio;
401*b843c749SSergey Zigachev 
402*b843c749SSergey Zigachev 	ctx->override_priority = priority;
403*b843c749SSergey Zigachev 
404*b843c749SSergey Zigachev 	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
405*b843c749SSergey Zigachev 			ctx->init_priority : ctx->override_priority;
406*b843c749SSergey Zigachev 
407*b843c749SSergey Zigachev 	for (i = 0; i < adev->num_rings; i++) {
408*b843c749SSergey Zigachev 		ring = adev->rings[i];
409*b843c749SSergey Zigachev 		entity = &ctx->rings[i].entity;
410*b843c749SSergey Zigachev 		rq = &ring->sched.sched_rq[ctx_prio];
411*b843c749SSergey Zigachev 
412*b843c749SSergey Zigachev 		if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
413*b843c749SSergey Zigachev 			continue;
414*b843c749SSergey Zigachev 
415*b843c749SSergey Zigachev 		drm_sched_entity_set_rq(entity, rq);
416*b843c749SSergey Zigachev 	}
417*b843c749SSergey Zigachev }
418*b843c749SSergey Zigachev 
419*b843c749SSergey Zigachev int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
420*b843c749SSergey Zigachev {
421*b843c749SSergey Zigachev 	struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
422*b843c749SSergey Zigachev 	unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
423*b843c749SSergey Zigachev 	struct dma_fence *other = cring->fences[idx];
424*b843c749SSergey Zigachev 
425*b843c749SSergey Zigachev 	if (other) {
426*b843c749SSergey Zigachev 		signed long r;
427*b843c749SSergey Zigachev 		r = dma_fence_wait(other, true);
428*b843c749SSergey Zigachev 		if (r < 0) {
429*b843c749SSergey Zigachev 			if (r != -ERESTARTSYS)
430*b843c749SSergey Zigachev 				DRM_ERROR("Error (%ld) waiting for fence!\n", r);
431*b843c749SSergey Zigachev 
432*b843c749SSergey Zigachev 			return r;
433*b843c749SSergey Zigachev 		}
434*b843c749SSergey Zigachev 	}
435*b843c749SSergey Zigachev 
436*b843c749SSergey Zigachev 	return 0;
437*b843c749SSergey Zigachev }
438*b843c749SSergey Zigachev 
439*b843c749SSergey Zigachev void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
440*b843c749SSergey Zigachev {
441*b843c749SSergey Zigachev 	mutex_init(&mgr->lock);
442*b843c749SSergey Zigachev 	idr_init(&mgr->ctx_handles);
443*b843c749SSergey Zigachev }
444*b843c749SSergey Zigachev 
445*b843c749SSergey Zigachev void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
446*b843c749SSergey Zigachev {
447*b843c749SSergey Zigachev 	struct amdgpu_ctx *ctx;
448*b843c749SSergey Zigachev 	struct idr *idp;
449*b843c749SSergey Zigachev 	uint32_t id, i;
450*b843c749SSergey Zigachev 	long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
451*b843c749SSergey Zigachev 
452*b843c749SSergey Zigachev 	idp = &mgr->ctx_handles;
453*b843c749SSergey Zigachev 
454*b843c749SSergey Zigachev 	mutex_lock(&mgr->lock);
455*b843c749SSergey Zigachev 	idr_for_each_entry(idp, ctx, id) {
456*b843c749SSergey Zigachev 
457*b843c749SSergey Zigachev 		if (!ctx->adev) {
458*b843c749SSergey Zigachev 			mutex_unlock(&mgr->lock);
459*b843c749SSergey Zigachev 			return;
460*b843c749SSergey Zigachev 		}
461*b843c749SSergey Zigachev 
462*b843c749SSergey Zigachev 		for (i = 0; i < ctx->adev->num_rings; i++) {
463*b843c749SSergey Zigachev 
464*b843c749SSergey Zigachev 			if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
465*b843c749SSergey Zigachev 				continue;
466*b843c749SSergey Zigachev 
467*b843c749SSergey Zigachev 			max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
468*b843c749SSergey Zigachev 							  max_wait);
469*b843c749SSergey Zigachev 		}
470*b843c749SSergey Zigachev 	}
471*b843c749SSergey Zigachev 	mutex_unlock(&mgr->lock);
472*b843c749SSergey Zigachev }
473*b843c749SSergey Zigachev 
474*b843c749SSergey Zigachev void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
475*b843c749SSergey Zigachev {
476*b843c749SSergey Zigachev 	struct amdgpu_ctx *ctx;
477*b843c749SSergey Zigachev 	struct idr *idp;
478*b843c749SSergey Zigachev 	uint32_t id, i;
479*b843c749SSergey Zigachev 
480*b843c749SSergey Zigachev 	idp = &mgr->ctx_handles;
481*b843c749SSergey Zigachev 
482*b843c749SSergey Zigachev 	idr_for_each_entry(idp, ctx, id) {
483*b843c749SSergey Zigachev 
484*b843c749SSergey Zigachev 		if (!ctx->adev)
485*b843c749SSergey Zigachev 			return;
486*b843c749SSergey Zigachev 
487*b843c749SSergey Zigachev 		for (i = 0; i < ctx->adev->num_rings; i++) {
488*b843c749SSergey Zigachev 
489*b843c749SSergey Zigachev 			if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
490*b843c749SSergey Zigachev 				continue;
491*b843c749SSergey Zigachev 
492*b843c749SSergey Zigachev 			if (kref_read(&ctx->refcount) == 1)
493*b843c749SSergey Zigachev 				drm_sched_entity_fini(&ctx->rings[i].entity);
494*b843c749SSergey Zigachev 			else
495*b843c749SSergey Zigachev 				DRM_ERROR("ctx %p is still alive\n", ctx);
496*b843c749SSergey Zigachev 		}
497*b843c749SSergey Zigachev 	}
498*b843c749SSergey Zigachev }
499*b843c749SSergey Zigachev 
500*b843c749SSergey Zigachev void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
501*b843c749SSergey Zigachev {
502*b843c749SSergey Zigachev 	struct amdgpu_ctx *ctx;
503*b843c749SSergey Zigachev 	struct idr *idp;
504*b843c749SSergey Zigachev 	uint32_t id;
505*b843c749SSergey Zigachev 
506*b843c749SSergey Zigachev 	amdgpu_ctx_mgr_entity_fini(mgr);
507*b843c749SSergey Zigachev 
508*b843c749SSergey Zigachev 	idp = &mgr->ctx_handles;
509*b843c749SSergey Zigachev 
510*b843c749SSergey Zigachev 	idr_for_each_entry(idp, ctx, id) {
511*b843c749SSergey Zigachev 		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
512*b843c749SSergey Zigachev 			DRM_ERROR("ctx %p is still alive\n", ctx);
513*b843c749SSergey Zigachev 	}
514*b843c749SSergey Zigachev 
515*b843c749SSergey Zigachev 	idr_destroy(&mgr->ctx_handles);
516*b843c749SSergey Zigachev 	mutex_destroy(&mgr->lock);
517*b843c749SSergey Zigachev }
518