1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev * Copyright 2015 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev *
4b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a
5b843c749SSergey Zigachev * copy of this software and associated documentation files (the "Software"),
6b843c749SSergey Zigachev * to deal in the Software without restriction, including without limitation
7b843c749SSergey Zigachev * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b843c749SSergey Zigachev * and/or sell copies of the Software, and to permit persons to whom the
9b843c749SSergey Zigachev * Software is furnished to do so, subject to the following conditions:
10b843c749SSergey Zigachev *
11b843c749SSergey Zigachev * The above copyright notice and this permission notice shall be included in
12b843c749SSergey Zigachev * all copies or substantial portions of the Software.
13b843c749SSergey Zigachev *
14b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17b843c749SSergey Zigachev * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18b843c749SSergey Zigachev * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19b843c749SSergey Zigachev * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20b843c749SSergey Zigachev * OTHER DEALINGS IN THE SOFTWARE.
21b843c749SSergey Zigachev *
22b843c749SSergey Zigachev * Authors: monk liu <monk.liu@amd.com>
23b843c749SSergey Zigachev */
24b843c749SSergey Zigachev
25b843c749SSergey Zigachev #include <drm/drmP.h>
26b843c749SSergey Zigachev #include <drm/drm_auth.h>
27b843c749SSergey Zigachev #include "amdgpu.h"
28b843c749SSergey Zigachev #include "amdgpu_sched.h"
29b843c749SSergey Zigachev
amdgpu_ctx_priority_permit(struct drm_file * filp,enum drm_sched_priority priority)30b843c749SSergey Zigachev static int amdgpu_ctx_priority_permit(struct drm_file *filp,
31b843c749SSergey Zigachev enum drm_sched_priority priority)
32b843c749SSergey Zigachev {
33b843c749SSergey Zigachev /* NORMAL and below are accessible by everyone */
34b843c749SSergey Zigachev if (priority <= DRM_SCHED_PRIORITY_NORMAL)
35b843c749SSergey Zigachev return 0;
36b843c749SSergey Zigachev
37*78973132SSergey Zigachev #if 0
38b843c749SSergey Zigachev if (capable(CAP_SYS_NICE))
39b843c749SSergey Zigachev return 0;
40*78973132SSergey Zigachev #endif
41b843c749SSergey Zigachev
42b843c749SSergey Zigachev if (drm_is_current_master(filp))
43b843c749SSergey Zigachev return 0;
44b843c749SSergey Zigachev
45b843c749SSergey Zigachev return -EACCES;
46b843c749SSergey Zigachev }
47b843c749SSergey Zigachev
amdgpu_ctx_init(struct amdgpu_device * adev,enum drm_sched_priority priority,struct drm_file * filp,struct amdgpu_ctx * ctx)48b843c749SSergey Zigachev static int amdgpu_ctx_init(struct amdgpu_device *adev,
49b843c749SSergey Zigachev enum drm_sched_priority priority,
50b843c749SSergey Zigachev struct drm_file *filp,
51b843c749SSergey Zigachev struct amdgpu_ctx *ctx)
52b843c749SSergey Zigachev {
53b843c749SSergey Zigachev unsigned i, j;
54b843c749SSergey Zigachev int r;
55b843c749SSergey Zigachev
56b843c749SSergey Zigachev if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
57b843c749SSergey Zigachev return -EINVAL;
58b843c749SSergey Zigachev
59b843c749SSergey Zigachev r = amdgpu_ctx_priority_permit(filp, priority);
60b843c749SSergey Zigachev if (r)
61b843c749SSergey Zigachev return r;
62b843c749SSergey Zigachev
63b843c749SSergey Zigachev memset(ctx, 0, sizeof(*ctx));
64b843c749SSergey Zigachev ctx->adev = adev;
65b843c749SSergey Zigachev kref_init(&ctx->refcount);
66*78973132SSergey Zigachev spin_init(&ctx->ring_lock, "agcrl");
67b843c749SSergey Zigachev ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
68b843c749SSergey Zigachev sizeof(struct dma_fence*), GFP_KERNEL);
69b843c749SSergey Zigachev if (!ctx->fences)
70b843c749SSergey Zigachev return -ENOMEM;
71b843c749SSergey Zigachev
72*78973132SSergey Zigachev lockinit(&ctx->lock, "agctxl", 0, LK_CANRECURSE);
73b843c749SSergey Zigachev
74b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
75b843c749SSergey Zigachev ctx->rings[i].sequence = 1;
76b843c749SSergey Zigachev ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
77b843c749SSergey Zigachev }
78b843c749SSergey Zigachev
79b843c749SSergey Zigachev ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
80b843c749SSergey Zigachev ctx->reset_counter_query = ctx->reset_counter;
81b843c749SSergey Zigachev ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
82b843c749SSergey Zigachev ctx->init_priority = priority;
83b843c749SSergey Zigachev ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
84b843c749SSergey Zigachev
85b843c749SSergey Zigachev /* create context entity for each ring */
86b843c749SSergey Zigachev for (i = 0; i < adev->num_rings; i++) {
87b843c749SSergey Zigachev struct amdgpu_ring *ring = adev->rings[i];
88b843c749SSergey Zigachev struct drm_sched_rq *rq;
89b843c749SSergey Zigachev
90b843c749SSergey Zigachev rq = &ring->sched.sched_rq[priority];
91b843c749SSergey Zigachev
92b843c749SSergey Zigachev if (ring == &adev->gfx.kiq.ring)
93b843c749SSergey Zigachev continue;
94b843c749SSergey Zigachev
95b843c749SSergey Zigachev r = drm_sched_entity_init(&ctx->rings[i].entity,
96b843c749SSergey Zigachev &rq, 1, &ctx->guilty);
97b843c749SSergey Zigachev if (r)
98b843c749SSergey Zigachev goto failed;
99b843c749SSergey Zigachev }
100b843c749SSergey Zigachev
101b843c749SSergey Zigachev r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
102b843c749SSergey Zigachev if (r)
103b843c749SSergey Zigachev goto failed;
104b843c749SSergey Zigachev
105b843c749SSergey Zigachev return 0;
106b843c749SSergey Zigachev
107b843c749SSergey Zigachev failed:
108b843c749SSergey Zigachev for (j = 0; j < i; j++)
109b843c749SSergey Zigachev drm_sched_entity_destroy(&ctx->rings[j].entity);
110b843c749SSergey Zigachev kfree(ctx->fences);
111b843c749SSergey Zigachev ctx->fences = NULL;
112b843c749SSergey Zigachev return r;
113b843c749SSergey Zigachev }
114b843c749SSergey Zigachev
amdgpu_ctx_fini(struct kref * ref)115b843c749SSergey Zigachev static void amdgpu_ctx_fini(struct kref *ref)
116b843c749SSergey Zigachev {
117b843c749SSergey Zigachev struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
118b843c749SSergey Zigachev struct amdgpu_device *adev = ctx->adev;
119b843c749SSergey Zigachev unsigned i, j;
120b843c749SSergey Zigachev
121b843c749SSergey Zigachev if (!adev)
122b843c749SSergey Zigachev return;
123b843c749SSergey Zigachev
124b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
125b843c749SSergey Zigachev for (j = 0; j < amdgpu_sched_jobs; ++j)
126b843c749SSergey Zigachev dma_fence_put(ctx->rings[i].fences[j]);
127b843c749SSergey Zigachev kfree(ctx->fences);
128b843c749SSergey Zigachev ctx->fences = NULL;
129b843c749SSergey Zigachev
130b843c749SSergey Zigachev amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
131b843c749SSergey Zigachev
132b843c749SSergey Zigachev mutex_destroy(&ctx->lock);
133b843c749SSergey Zigachev
134b843c749SSergey Zigachev kfree(ctx);
135b843c749SSergey Zigachev }
136b843c749SSergey Zigachev
amdgpu_ctx_alloc(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,struct drm_file * filp,enum drm_sched_priority priority,uint32_t * id)137b843c749SSergey Zigachev static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
138b843c749SSergey Zigachev struct amdgpu_fpriv *fpriv,
139b843c749SSergey Zigachev struct drm_file *filp,
140b843c749SSergey Zigachev enum drm_sched_priority priority,
141b843c749SSergey Zigachev uint32_t *id)
142b843c749SSergey Zigachev {
143b843c749SSergey Zigachev struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
144b843c749SSergey Zigachev struct amdgpu_ctx *ctx;
145b843c749SSergey Zigachev int r;
146b843c749SSergey Zigachev
147*78973132SSergey Zigachev ctx = kmalloc(sizeof(*ctx), M_DRM, GFP_KERNEL);
148b843c749SSergey Zigachev if (!ctx)
149b843c749SSergey Zigachev return -ENOMEM;
150b843c749SSergey Zigachev
151b843c749SSergey Zigachev mutex_lock(&mgr->lock);
152b843c749SSergey Zigachev r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
153b843c749SSergey Zigachev if (r < 0) {
154b843c749SSergey Zigachev mutex_unlock(&mgr->lock);
155b843c749SSergey Zigachev kfree(ctx);
156b843c749SSergey Zigachev return r;
157b843c749SSergey Zigachev }
158b843c749SSergey Zigachev
159b843c749SSergey Zigachev *id = (uint32_t)r;
160b843c749SSergey Zigachev r = amdgpu_ctx_init(adev, priority, filp, ctx);
161b843c749SSergey Zigachev if (r) {
162b843c749SSergey Zigachev idr_remove(&mgr->ctx_handles, *id);
163b843c749SSergey Zigachev *id = 0;
164b843c749SSergey Zigachev kfree(ctx);
165b843c749SSergey Zigachev }
166b843c749SSergey Zigachev mutex_unlock(&mgr->lock);
167b843c749SSergey Zigachev return r;
168b843c749SSergey Zigachev }
169b843c749SSergey Zigachev
amdgpu_ctx_do_release(struct kref * ref)170b843c749SSergey Zigachev static void amdgpu_ctx_do_release(struct kref *ref)
171b843c749SSergey Zigachev {
172b843c749SSergey Zigachev struct amdgpu_ctx *ctx;
173b843c749SSergey Zigachev u32 i;
174b843c749SSergey Zigachev
175b843c749SSergey Zigachev ctx = container_of(ref, struct amdgpu_ctx, refcount);
176b843c749SSergey Zigachev
177b843c749SSergey Zigachev for (i = 0; i < ctx->adev->num_rings; i++) {
178b843c749SSergey Zigachev
179b843c749SSergey Zigachev if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
180b843c749SSergey Zigachev continue;
181b843c749SSergey Zigachev
182b843c749SSergey Zigachev drm_sched_entity_destroy(&ctx->rings[i].entity);
183b843c749SSergey Zigachev }
184b843c749SSergey Zigachev
185b843c749SSergey Zigachev amdgpu_ctx_fini(ref);
186b843c749SSergey Zigachev }
187b843c749SSergey Zigachev
amdgpu_ctx_free(struct amdgpu_fpriv * fpriv,uint32_t id)188b843c749SSergey Zigachev static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
189b843c749SSergey Zigachev {
190b843c749SSergey Zigachev struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
191b843c749SSergey Zigachev struct amdgpu_ctx *ctx;
192b843c749SSergey Zigachev
193b843c749SSergey Zigachev mutex_lock(&mgr->lock);
194b843c749SSergey Zigachev ctx = idr_remove(&mgr->ctx_handles, id);
195b843c749SSergey Zigachev if (ctx)
196b843c749SSergey Zigachev kref_put(&ctx->refcount, amdgpu_ctx_do_release);
197b843c749SSergey Zigachev mutex_unlock(&mgr->lock);
198b843c749SSergey Zigachev return ctx ? 0 : -EINVAL;
199b843c749SSergey Zigachev }
200b843c749SSergey Zigachev
amdgpu_ctx_query(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,uint32_t id,union drm_amdgpu_ctx_out * out)201b843c749SSergey Zigachev static int amdgpu_ctx_query(struct amdgpu_device *adev,
202b843c749SSergey Zigachev struct amdgpu_fpriv *fpriv, uint32_t id,
203b843c749SSergey Zigachev union drm_amdgpu_ctx_out *out)
204b843c749SSergey Zigachev {
205b843c749SSergey Zigachev struct amdgpu_ctx *ctx;
206b843c749SSergey Zigachev struct amdgpu_ctx_mgr *mgr;
207b843c749SSergey Zigachev unsigned reset_counter;
208b843c749SSergey Zigachev
209b843c749SSergey Zigachev if (!fpriv)
210b843c749SSergey Zigachev return -EINVAL;
211b843c749SSergey Zigachev
212b843c749SSergey Zigachev mgr = &fpriv->ctx_mgr;
213b843c749SSergey Zigachev mutex_lock(&mgr->lock);
214b843c749SSergey Zigachev ctx = idr_find(&mgr->ctx_handles, id);
215b843c749SSergey Zigachev if (!ctx) {
216b843c749SSergey Zigachev mutex_unlock(&mgr->lock);
217b843c749SSergey Zigachev return -EINVAL;
218b843c749SSergey Zigachev }
219b843c749SSergey Zigachev
220b843c749SSergey Zigachev /* TODO: these two are always zero */
221b843c749SSergey Zigachev out->state.flags = 0x0;
222b843c749SSergey Zigachev out->state.hangs = 0x0;
223b843c749SSergey Zigachev
224b843c749SSergey Zigachev /* determine if a GPU reset has occured since the last call */
225b843c749SSergey Zigachev reset_counter = atomic_read(&adev->gpu_reset_counter);
226b843c749SSergey Zigachev /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
227b843c749SSergey Zigachev if (ctx->reset_counter_query == reset_counter)
228b843c749SSergey Zigachev out->state.reset_status = AMDGPU_CTX_NO_RESET;
229b843c749SSergey Zigachev else
230b843c749SSergey Zigachev out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
231b843c749SSergey Zigachev ctx->reset_counter_query = reset_counter;
232b843c749SSergey Zigachev
233b843c749SSergey Zigachev mutex_unlock(&mgr->lock);
234b843c749SSergey Zigachev return 0;
235b843c749SSergey Zigachev }
236b843c749SSergey Zigachev
amdgpu_ctx_query2(struct amdgpu_device * adev,struct amdgpu_fpriv * fpriv,uint32_t id,union drm_amdgpu_ctx_out * out)237b843c749SSergey Zigachev static int amdgpu_ctx_query2(struct amdgpu_device *adev,
238b843c749SSergey Zigachev struct amdgpu_fpriv *fpriv, uint32_t id,
239b843c749SSergey Zigachev union drm_amdgpu_ctx_out *out)
240b843c749SSergey Zigachev {
241b843c749SSergey Zigachev struct amdgpu_ctx *ctx;
242b843c749SSergey Zigachev struct amdgpu_ctx_mgr *mgr;
243b843c749SSergey Zigachev
244b843c749SSergey Zigachev if (!fpriv)
245b843c749SSergey Zigachev return -EINVAL;
246b843c749SSergey Zigachev
247b843c749SSergey Zigachev mgr = &fpriv->ctx_mgr;
248b843c749SSergey Zigachev mutex_lock(&mgr->lock);
249b843c749SSergey Zigachev ctx = idr_find(&mgr->ctx_handles, id);
250b843c749SSergey Zigachev if (!ctx) {
251b843c749SSergey Zigachev mutex_unlock(&mgr->lock);
252b843c749SSergey Zigachev return -EINVAL;
253b843c749SSergey Zigachev }
254b843c749SSergey Zigachev
255b843c749SSergey Zigachev out->state.flags = 0x0;
256b843c749SSergey Zigachev out->state.hangs = 0x0;
257b843c749SSergey Zigachev
258b843c749SSergey Zigachev if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
259b843c749SSergey Zigachev out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
260b843c749SSergey Zigachev
261b843c749SSergey Zigachev if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
262b843c749SSergey Zigachev out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
263b843c749SSergey Zigachev
264b843c749SSergey Zigachev if (atomic_read(&ctx->guilty))
265b843c749SSergey Zigachev out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
266b843c749SSergey Zigachev
267b843c749SSergey Zigachev mutex_unlock(&mgr->lock);
268b843c749SSergey Zigachev return 0;
269b843c749SSergey Zigachev }
270b843c749SSergey Zigachev
amdgpu_ctx_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)271b843c749SSergey Zigachev int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
272b843c749SSergey Zigachev struct drm_file *filp)
273b843c749SSergey Zigachev {
274b843c749SSergey Zigachev int r;
275b843c749SSergey Zigachev uint32_t id;
276b843c749SSergey Zigachev enum drm_sched_priority priority;
277b843c749SSergey Zigachev
278b843c749SSergey Zigachev union drm_amdgpu_ctx *args = data;
279b843c749SSergey Zigachev struct amdgpu_device *adev = dev->dev_private;
280b843c749SSergey Zigachev struct amdgpu_fpriv *fpriv = filp->driver_priv;
281b843c749SSergey Zigachev
282b843c749SSergey Zigachev r = 0;
283b843c749SSergey Zigachev id = args->in.ctx_id;
284b843c749SSergey Zigachev priority = amdgpu_to_sched_priority(args->in.priority);
285b843c749SSergey Zigachev
286b843c749SSergey Zigachev /* For backwards compatibility reasons, we need to accept
287b843c749SSergey Zigachev * ioctls with garbage in the priority field */
288b843c749SSergey Zigachev if (priority == DRM_SCHED_PRIORITY_INVALID)
289b843c749SSergey Zigachev priority = DRM_SCHED_PRIORITY_NORMAL;
290b843c749SSergey Zigachev
291b843c749SSergey Zigachev switch (args->in.op) {
292b843c749SSergey Zigachev case AMDGPU_CTX_OP_ALLOC_CTX:
293b843c749SSergey Zigachev r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
294b843c749SSergey Zigachev args->out.alloc.ctx_id = id;
295b843c749SSergey Zigachev break;
296b843c749SSergey Zigachev case AMDGPU_CTX_OP_FREE_CTX:
297b843c749SSergey Zigachev r = amdgpu_ctx_free(fpriv, id);
298b843c749SSergey Zigachev break;
299b843c749SSergey Zigachev case AMDGPU_CTX_OP_QUERY_STATE:
300b843c749SSergey Zigachev r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
301b843c749SSergey Zigachev break;
302b843c749SSergey Zigachev case AMDGPU_CTX_OP_QUERY_STATE2:
303b843c749SSergey Zigachev r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
304b843c749SSergey Zigachev break;
305b843c749SSergey Zigachev default:
306b843c749SSergey Zigachev return -EINVAL;
307b843c749SSergey Zigachev }
308b843c749SSergey Zigachev
309b843c749SSergey Zigachev return r;
310b843c749SSergey Zigachev }
311b843c749SSergey Zigachev
amdgpu_ctx_get(struct amdgpu_fpriv * fpriv,uint32_t id)312b843c749SSergey Zigachev struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
313b843c749SSergey Zigachev {
314b843c749SSergey Zigachev struct amdgpu_ctx *ctx;
315b843c749SSergey Zigachev struct amdgpu_ctx_mgr *mgr;
316b843c749SSergey Zigachev
317b843c749SSergey Zigachev if (!fpriv)
318b843c749SSergey Zigachev return NULL;
319b843c749SSergey Zigachev
320b843c749SSergey Zigachev mgr = &fpriv->ctx_mgr;
321b843c749SSergey Zigachev
322b843c749SSergey Zigachev mutex_lock(&mgr->lock);
323b843c749SSergey Zigachev ctx = idr_find(&mgr->ctx_handles, id);
324b843c749SSergey Zigachev if (ctx)
325b843c749SSergey Zigachev kref_get(&ctx->refcount);
326b843c749SSergey Zigachev mutex_unlock(&mgr->lock);
327b843c749SSergey Zigachev return ctx;
328b843c749SSergey Zigachev }
329b843c749SSergey Zigachev
amdgpu_ctx_put(struct amdgpu_ctx * ctx)330b843c749SSergey Zigachev int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
331b843c749SSergey Zigachev {
332b843c749SSergey Zigachev if (ctx == NULL)
333b843c749SSergey Zigachev return -EINVAL;
334b843c749SSergey Zigachev
335b843c749SSergey Zigachev kref_put(&ctx->refcount, amdgpu_ctx_do_release);
336b843c749SSergey Zigachev return 0;
337b843c749SSergey Zigachev }
338b843c749SSergey Zigachev
amdgpu_ctx_add_fence(struct amdgpu_ctx * ctx,struct amdgpu_ring * ring,struct dma_fence * fence,uint64_t * handler)339b843c749SSergey Zigachev int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
340b843c749SSergey Zigachev struct dma_fence *fence, uint64_t* handler)
341b843c749SSergey Zigachev {
342b843c749SSergey Zigachev struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
343b843c749SSergey Zigachev uint64_t seq = cring->sequence;
344b843c749SSergey Zigachev unsigned idx = 0;
345b843c749SSergey Zigachev struct dma_fence *other = NULL;
346b843c749SSergey Zigachev
347b843c749SSergey Zigachev idx = seq & (amdgpu_sched_jobs - 1);
348b843c749SSergey Zigachev other = cring->fences[idx];
349b843c749SSergey Zigachev if (other)
350b843c749SSergey Zigachev BUG_ON(!dma_fence_is_signaled(other));
351b843c749SSergey Zigachev
352b843c749SSergey Zigachev dma_fence_get(fence);
353b843c749SSergey Zigachev
354b843c749SSergey Zigachev spin_lock(&ctx->ring_lock);
355b843c749SSergey Zigachev cring->fences[idx] = fence;
356b843c749SSergey Zigachev cring->sequence++;
357b843c749SSergey Zigachev spin_unlock(&ctx->ring_lock);
358b843c749SSergey Zigachev
359b843c749SSergey Zigachev dma_fence_put(other);
360b843c749SSergey Zigachev if (handler)
361b843c749SSergey Zigachev *handler = seq;
362b843c749SSergey Zigachev
363b843c749SSergey Zigachev return 0;
364b843c749SSergey Zigachev }
365b843c749SSergey Zigachev
amdgpu_ctx_get_fence(struct amdgpu_ctx * ctx,struct amdgpu_ring * ring,uint64_t seq)366b843c749SSergey Zigachev struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
367b843c749SSergey Zigachev struct amdgpu_ring *ring, uint64_t seq)
368b843c749SSergey Zigachev {
369b843c749SSergey Zigachev struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
370b843c749SSergey Zigachev struct dma_fence *fence;
371b843c749SSergey Zigachev
372b843c749SSergey Zigachev spin_lock(&ctx->ring_lock);
373b843c749SSergey Zigachev
374b843c749SSergey Zigachev if (seq == ~0ull)
375b843c749SSergey Zigachev seq = ctx->rings[ring->idx].sequence - 1;
376b843c749SSergey Zigachev
377b843c749SSergey Zigachev if (seq >= cring->sequence) {
378b843c749SSergey Zigachev spin_unlock(&ctx->ring_lock);
379b843c749SSergey Zigachev return ERR_PTR(-EINVAL);
380b843c749SSergey Zigachev }
381b843c749SSergey Zigachev
382b843c749SSergey Zigachev
383b843c749SSergey Zigachev if (seq + amdgpu_sched_jobs < cring->sequence) {
384b843c749SSergey Zigachev spin_unlock(&ctx->ring_lock);
385b843c749SSergey Zigachev return NULL;
386b843c749SSergey Zigachev }
387b843c749SSergey Zigachev
388b843c749SSergey Zigachev fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
389b843c749SSergey Zigachev spin_unlock(&ctx->ring_lock);
390b843c749SSergey Zigachev
391b843c749SSergey Zigachev return fence;
392b843c749SSergey Zigachev }
393b843c749SSergey Zigachev
amdgpu_ctx_priority_override(struct amdgpu_ctx * ctx,enum drm_sched_priority priority)394b843c749SSergey Zigachev void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
395b843c749SSergey Zigachev enum drm_sched_priority priority)
396b843c749SSergey Zigachev {
397b843c749SSergey Zigachev int i;
398b843c749SSergey Zigachev struct amdgpu_device *adev = ctx->adev;
399b843c749SSergey Zigachev struct drm_sched_rq *rq;
400b843c749SSergey Zigachev struct drm_sched_entity *entity;
401b843c749SSergey Zigachev struct amdgpu_ring *ring;
402b843c749SSergey Zigachev enum drm_sched_priority ctx_prio;
403b843c749SSergey Zigachev
404b843c749SSergey Zigachev ctx->override_priority = priority;
405b843c749SSergey Zigachev
406b843c749SSergey Zigachev ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
407b843c749SSergey Zigachev ctx->init_priority : ctx->override_priority;
408b843c749SSergey Zigachev
409b843c749SSergey Zigachev for (i = 0; i < adev->num_rings; i++) {
410b843c749SSergey Zigachev ring = adev->rings[i];
411b843c749SSergey Zigachev entity = &ctx->rings[i].entity;
412b843c749SSergey Zigachev rq = &ring->sched.sched_rq[ctx_prio];
413b843c749SSergey Zigachev
414b843c749SSergey Zigachev if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
415b843c749SSergey Zigachev continue;
416b843c749SSergey Zigachev
417b843c749SSergey Zigachev drm_sched_entity_set_rq(entity, rq);
418b843c749SSergey Zigachev }
419b843c749SSergey Zigachev }
420b843c749SSergey Zigachev
amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx * ctx,unsigned ring_id)421b843c749SSergey Zigachev int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
422b843c749SSergey Zigachev {
423b843c749SSergey Zigachev struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
424b843c749SSergey Zigachev unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
425b843c749SSergey Zigachev struct dma_fence *other = cring->fences[idx];
426b843c749SSergey Zigachev
427b843c749SSergey Zigachev if (other) {
428b843c749SSergey Zigachev signed long r;
429b843c749SSergey Zigachev r = dma_fence_wait(other, true);
430b843c749SSergey Zigachev if (r < 0) {
431b843c749SSergey Zigachev if (r != -ERESTARTSYS)
432b843c749SSergey Zigachev DRM_ERROR("Error (%ld) waiting for fence!\n", r);
433b843c749SSergey Zigachev
434b843c749SSergey Zigachev return r;
435b843c749SSergey Zigachev }
436b843c749SSergey Zigachev }
437b843c749SSergey Zigachev
438b843c749SSergey Zigachev return 0;
439b843c749SSergey Zigachev }
440b843c749SSergey Zigachev
amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr * mgr)441b843c749SSergey Zigachev void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
442b843c749SSergey Zigachev {
443*78973132SSergey Zigachev lockinit(&mgr->lock, "agml", 0, LK_CANRECURSE);
444b843c749SSergey Zigachev idr_init(&mgr->ctx_handles);
445b843c749SSergey Zigachev }
446b843c749SSergey Zigachev
amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr * mgr)447b843c749SSergey Zigachev void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
448b843c749SSergey Zigachev {
449b843c749SSergey Zigachev struct amdgpu_ctx *ctx;
450b843c749SSergey Zigachev struct idr *idp;
451b843c749SSergey Zigachev uint32_t id, i;
452b843c749SSergey Zigachev long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
453b843c749SSergey Zigachev
454b843c749SSergey Zigachev idp = &mgr->ctx_handles;
455b843c749SSergey Zigachev
456b843c749SSergey Zigachev mutex_lock(&mgr->lock);
457b843c749SSergey Zigachev idr_for_each_entry(idp, ctx, id) {
458b843c749SSergey Zigachev
459b843c749SSergey Zigachev if (!ctx->adev) {
460b843c749SSergey Zigachev mutex_unlock(&mgr->lock);
461b843c749SSergey Zigachev return;
462b843c749SSergey Zigachev }
463b843c749SSergey Zigachev
464b843c749SSergey Zigachev for (i = 0; i < ctx->adev->num_rings; i++) {
465b843c749SSergey Zigachev
466b843c749SSergey Zigachev if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
467b843c749SSergey Zigachev continue;
468b843c749SSergey Zigachev
469b843c749SSergey Zigachev max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
470b843c749SSergey Zigachev max_wait);
471b843c749SSergey Zigachev }
472b843c749SSergey Zigachev }
473b843c749SSergey Zigachev mutex_unlock(&mgr->lock);
474b843c749SSergey Zigachev }
475b843c749SSergey Zigachev
amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr * mgr)476b843c749SSergey Zigachev void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
477b843c749SSergey Zigachev {
478b843c749SSergey Zigachev struct amdgpu_ctx *ctx;
479b843c749SSergey Zigachev struct idr *idp;
480b843c749SSergey Zigachev uint32_t id, i;
481b843c749SSergey Zigachev
482b843c749SSergey Zigachev idp = &mgr->ctx_handles;
483b843c749SSergey Zigachev
484b843c749SSergey Zigachev idr_for_each_entry(idp, ctx, id) {
485b843c749SSergey Zigachev
486b843c749SSergey Zigachev if (!ctx->adev)
487b843c749SSergey Zigachev return;
488b843c749SSergey Zigachev
489b843c749SSergey Zigachev for (i = 0; i < ctx->adev->num_rings; i++) {
490b843c749SSergey Zigachev
491b843c749SSergey Zigachev if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
492b843c749SSergey Zigachev continue;
493b843c749SSergey Zigachev
494b843c749SSergey Zigachev if (kref_read(&ctx->refcount) == 1)
495b843c749SSergey Zigachev drm_sched_entity_fini(&ctx->rings[i].entity);
496b843c749SSergey Zigachev else
497b843c749SSergey Zigachev DRM_ERROR("ctx %p is still alive\n", ctx);
498b843c749SSergey Zigachev }
499b843c749SSergey Zigachev }
500b843c749SSergey Zigachev }
501b843c749SSergey Zigachev
amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr * mgr)502b843c749SSergey Zigachev void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
503b843c749SSergey Zigachev {
504b843c749SSergey Zigachev struct amdgpu_ctx *ctx;
505b843c749SSergey Zigachev struct idr *idp;
506b843c749SSergey Zigachev uint32_t id;
507b843c749SSergey Zigachev
508b843c749SSergey Zigachev amdgpu_ctx_mgr_entity_fini(mgr);
509b843c749SSergey Zigachev
510b843c749SSergey Zigachev idp = &mgr->ctx_handles;
511b843c749SSergey Zigachev
512b843c749SSergey Zigachev idr_for_each_entry(idp, ctx, id) {
513b843c749SSergey Zigachev if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
514b843c749SSergey Zigachev DRM_ERROR("ctx %p is still alive\n", ctx);
515b843c749SSergey Zigachev }
516b843c749SSergey Zigachev
517b843c749SSergey Zigachev idr_destroy(&mgr->ctx_handles);
518b843c749SSergey Zigachev mutex_destroy(&mgr->lock);
519b843c749SSergey Zigachev }
520