xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_ctx.c (revision 53d1339bf7f9c7367b35a9e1ebe693f9b047a47b)
1 /*	$NetBSD: amdgpu_ctx.c,v 1.5 2020/02/14 04:35:19 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2015 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: monk liu <monk.liu@amd.com>
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ctx.c,v 1.5 2020/02/14 04:35:19 riastradh Exp $");
29 
30 #include <drm/drmP.h>
31 #include "amdgpu.h"
32 
33 #include <linux/nbsd-namespace.h>
34 
35 int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
36 		    struct amdgpu_ctx *ctx)
37 {
38 	unsigned i, j;
39 	int r;
40 
41 	memset(ctx, 0, sizeof(*ctx));
42 	ctx->adev = adev;
43 	kref_init(&ctx->refcount);
44 	spin_lock_init(&ctx->ring_lock);
45 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
46 		ctx->rings[i].sequence = 1;
47 
48 	if (amdgpu_enable_scheduler) {
49 		/* create context entity for each ring */
50 		for (i = 0; i < adev->num_rings; i++) {
51 			struct amd_sched_rq *rq;
52 			if (kernel)
53 				rq = &adev->rings[i]->sched.kernel_rq;
54 			else
55 				rq = &adev->rings[i]->sched.sched_rq;
56 			r = amd_sched_entity_init(&adev->rings[i]->sched,
57 						  &ctx->rings[i].entity,
58 						  rq, amdgpu_sched_jobs);
59 			if (r)
60 				break;
61 		}
62 
63 		if (i < adev->num_rings) {
64 			for (j = 0; j < i; j++)
65 				amd_sched_entity_fini(&adev->rings[j]->sched,
66 						      &ctx->rings[j].entity);
67 			kfree(ctx);
68 			return r;
69 		}
70 	}
71 	return 0;
72 }
73 
74 void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
75 {
76 	struct amdgpu_device *adev = ctx->adev;
77 	unsigned i, j;
78 
79 	if (!adev)
80 		return;
81 
82 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
83 		for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
84 			fence_put(ctx->rings[i].fences[j]);
85 
86 	if (amdgpu_enable_scheduler) {
87 		for (i = 0; i < adev->num_rings; i++)
88 			amd_sched_entity_fini(&adev->rings[i]->sched,
89 					      &ctx->rings[i].entity);
90 	}
91 }
92 
93 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
94 			    struct amdgpu_fpriv *fpriv,
95 			    uint32_t *id)
96 {
97 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
98 	struct amdgpu_ctx *ctx;
99 	int r;
100 
101 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
102 	if (!ctx)
103 		return -ENOMEM;
104 
105 	idr_preload(GFP_KERNEL);
106 	mutex_lock(&mgr->lock);
107 	r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
108 	if (r < 0) {
109 		mutex_unlock(&mgr->lock);
110 		idr_preload_end();
111 		kfree(ctx);
112 		return r;
113 	}
114 	*id = (uint32_t)r;
115 	r = amdgpu_ctx_init(adev, false, ctx);
116 	mutex_unlock(&mgr->lock);
117 	idr_preload_end();
118 
119 	return r;
120 }
121 
122 static void amdgpu_ctx_do_release(struct kref *ref)
123 {
124 	struct amdgpu_ctx *ctx;
125 
126 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
127 
128 	amdgpu_ctx_fini(ctx);
129 
130 	kfree(ctx);
131 }
132 
133 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
134 {
135 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
136 	struct amdgpu_ctx *ctx;
137 
138 	mutex_lock(&mgr->lock);
139 	ctx = idr_find(&mgr->ctx_handles, id);
140 	if (ctx) {
141 		idr_remove(&mgr->ctx_handles, id);
142 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
143 		mutex_unlock(&mgr->lock);
144 		return 0;
145 	}
146 	mutex_unlock(&mgr->lock);
147 	return -EINVAL;
148 }
149 
150 static int amdgpu_ctx_query(struct amdgpu_device *adev,
151 			    struct amdgpu_fpriv *fpriv, uint32_t id,
152 			    union drm_amdgpu_ctx_out *out)
153 {
154 	struct amdgpu_ctx *ctx;
155 	struct amdgpu_ctx_mgr *mgr;
156 	unsigned reset_counter;
157 
158 	if (!fpriv)
159 		return -EINVAL;
160 
161 	mgr = &fpriv->ctx_mgr;
162 	mutex_lock(&mgr->lock);
163 	ctx = idr_find(&mgr->ctx_handles, id);
164 	if (!ctx) {
165 		mutex_unlock(&mgr->lock);
166 		return -EINVAL;
167 	}
168 
169 	/* TODO: these two are always zero */
170 	out->state.flags = 0x0;
171 	out->state.hangs = 0x0;
172 
173 	/* determine if a GPU reset has occured since the last call */
174 	reset_counter = atomic_read(&adev->gpu_reset_counter);
175 	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
176 	if (ctx->reset_counter == reset_counter)
177 		out->state.reset_status = AMDGPU_CTX_NO_RESET;
178 	else
179 		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
180 	ctx->reset_counter = reset_counter;
181 
182 	mutex_unlock(&mgr->lock);
183 	return 0;
184 }
185 
186 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
187 		     struct drm_file *filp)
188 {
189 	int r;
190 	uint32_t id;
191 
192 	union drm_amdgpu_ctx *args = data;
193 	struct amdgpu_device *adev = dev->dev_private;
194 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
195 
196 	r = 0;
197 	id = args->in.ctx_id;
198 
199 	switch (args->in.op) {
200 		case AMDGPU_CTX_OP_ALLOC_CTX:
201 			r = amdgpu_ctx_alloc(adev, fpriv, &id);
202 			args->out.alloc.ctx_id = id;
203 			break;
204 		case AMDGPU_CTX_OP_FREE_CTX:
205 			r = amdgpu_ctx_free(fpriv, id);
206 			break;
207 		case AMDGPU_CTX_OP_QUERY_STATE:
208 			r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
209 			break;
210 		default:
211 			return -EINVAL;
212 	}
213 
214 	return r;
215 }
216 
217 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
218 {
219 	struct amdgpu_ctx *ctx;
220 	struct amdgpu_ctx_mgr *mgr;
221 
222 	if (!fpriv)
223 		return NULL;
224 
225 	mgr = &fpriv->ctx_mgr;
226 
227 	mutex_lock(&mgr->lock);
228 	ctx = idr_find(&mgr->ctx_handles, id);
229 	if (ctx)
230 		kref_get(&ctx->refcount);
231 	mutex_unlock(&mgr->lock);
232 	return ctx;
233 }
234 
235 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
236 {
237 	if (ctx == NULL)
238 		return -EINVAL;
239 
240 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
241 	return 0;
242 }
243 
244 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
245 			      struct fence *fence)
246 {
247 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
248 	uint64_t seq = cring->sequence;
249 	unsigned idx = 0;
250 	struct fence *other = NULL;
251 
252 	idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
253 	other = cring->fences[idx];
254 	if (other) {
255 		signed long r;
256 		r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
257 		if (r < 0)
258 			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
259 	}
260 
261 	fence_get(fence);
262 
263 	spin_lock(&ctx->ring_lock);
264 	cring->fences[idx] = fence;
265 	cring->sequence++;
266 	spin_unlock(&ctx->ring_lock);
267 
268 	fence_put(other);
269 
270 	return seq;
271 }
272 
273 struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
274 				   struct amdgpu_ring *ring, uint64_t seq)
275 {
276 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
277 	struct fence *fence;
278 
279 	spin_lock(&ctx->ring_lock);
280 
281 	if (seq >= cring->sequence) {
282 		spin_unlock(&ctx->ring_lock);
283 		return ERR_PTR(-EINVAL);
284 	}
285 
286 
287 	if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
288 		spin_unlock(&ctx->ring_lock);
289 		return NULL;
290 	}
291 
292 	fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
293 	spin_unlock(&ctx->ring_lock);
294 
295 	return fence;
296 }
297 
298 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
299 {
300 	mutex_init(&mgr->lock);
301 	idr_init(&mgr->ctx_handles);
302 }
303 
304 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
305 {
306 	struct amdgpu_ctx *ctx;
307 	struct idr *idp;
308 	uint32_t id;
309 
310 	idp = &mgr->ctx_handles;
311 
312 	idr_for_each_entry(idp, ctx, id) {
313 		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
314 			DRM_ERROR("ctx %p is still alive\n", ctx);
315 	}
316 
317 	idr_destroy(&mgr->ctx_handles);
318 	mutex_destroy(&mgr->lock);
319 }
320