1 /* $NetBSD: amdgpu_ctx.c,v 1.4 2018/08/27 14:14:28 riastradh Exp $ */ 2 3 /* 4 * Copyright 2015 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: monk liu <monk.liu@amd.com> 25 */ 26 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ctx.c,v 1.4 2018/08/27 14:14:28 riastradh Exp $"); 29 30 #include <drm/drmP.h> 31 #include "amdgpu.h" 32 33 int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, 34 struct amdgpu_ctx *ctx) 35 { 36 unsigned i, j; 37 int r; 38 39 memset(ctx, 0, sizeof(*ctx)); 40 ctx->adev = adev; 41 kref_init(&ctx->refcount); 42 spin_lock_init(&ctx->ring_lock); 43 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 44 ctx->rings[i].sequence = 1; 45 46 if (amdgpu_enable_scheduler) { 47 /* create context entity for each ring */ 48 for (i = 0; i < adev->num_rings; i++) { 49 struct amd_sched_rq *rq; 50 if (kernel) 51 rq = &adev->rings[i]->sched.kernel_rq; 52 else 53 rq = &adev->rings[i]->sched.sched_rq; 54 r = amd_sched_entity_init(&adev->rings[i]->sched, 55 &ctx->rings[i].entity, 56 rq, amdgpu_sched_jobs); 57 if (r) 58 break; 59 } 60 61 if (i < adev->num_rings) { 62 for (j = 0; j < i; j++) 63 amd_sched_entity_fini(&adev->rings[j]->sched, 64 &ctx->rings[j].entity); 65 kfree(ctx); 66 return r; 67 } 68 } 69 return 0; 70 } 71 72 void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) 73 { 74 struct amdgpu_device *adev = ctx->adev; 75 unsigned i, j; 76 77 if (!adev) 78 return; 79 80 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 81 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) 82 fence_put(ctx->rings[i].fences[j]); 83 84 if (amdgpu_enable_scheduler) { 85 for (i = 0; i < adev->num_rings; i++) 86 amd_sched_entity_fini(&adev->rings[i]->sched, 87 &ctx->rings[i].entity); 88 } 89 } 90 91 static int amdgpu_ctx_alloc(struct amdgpu_device *adev, 92 struct amdgpu_fpriv *fpriv, 93 uint32_t *id) 94 { 95 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 96 struct amdgpu_ctx *ctx; 97 int r; 98 99 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 100 if (!ctx) 101 return -ENOMEM; 102 103 idr_preload(GFP_KERNEL); 104 mutex_lock(&mgr->lock); 105 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); 106 if (r < 0) { 107 mutex_unlock(&mgr->lock); 108 idr_preload_end(); 109 kfree(ctx); 110 return r; 111 } 112 *id = (uint32_t)r; 113 r = amdgpu_ctx_init(adev, false, ctx); 114 mutex_unlock(&mgr->lock); 115 idr_preload_end(); 116 117 return r; 118 } 119 120 static void amdgpu_ctx_do_release(struct kref *ref) 121 { 122 struct amdgpu_ctx *ctx; 123 124 ctx = container_of(ref, struct amdgpu_ctx, refcount); 125 126 amdgpu_ctx_fini(ctx); 127 128 kfree(ctx); 129 } 130 131 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) 132 { 133 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 134 struct amdgpu_ctx *ctx; 135 136 mutex_lock(&mgr->lock); 137 ctx = idr_find(&mgr->ctx_handles, id); 138 if (ctx) { 139 idr_remove(&mgr->ctx_handles, id); 140 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 141 mutex_unlock(&mgr->lock); 142 return 0; 143 } 144 mutex_unlock(&mgr->lock); 145 return -EINVAL; 146 } 147 148 static int amdgpu_ctx_query(struct amdgpu_device *adev, 149 struct amdgpu_fpriv *fpriv, uint32_t id, 150 union drm_amdgpu_ctx_out *out) 151 { 152 struct amdgpu_ctx *ctx; 153 struct amdgpu_ctx_mgr *mgr; 154 unsigned reset_counter; 155 156 if (!fpriv) 157 return -EINVAL; 158 159 mgr = &fpriv->ctx_mgr; 160 mutex_lock(&mgr->lock); 161 ctx = idr_find(&mgr->ctx_handles, id); 162 if (!ctx) { 163 mutex_unlock(&mgr->lock); 164 return -EINVAL; 165 } 166 167 /* TODO: these two are always zero */ 168 out->state.flags = 0x0; 169 out->state.hangs = 0x0; 170 171 /* determine if a GPU reset has occured since the last call */ 172 reset_counter = atomic_read(&adev->gpu_reset_counter); 173 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */ 174 if (ctx->reset_counter == reset_counter) 175 out->state.reset_status = AMDGPU_CTX_NO_RESET; 176 else 177 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET; 178 ctx->reset_counter = reset_counter; 179 180 mutex_unlock(&mgr->lock); 181 return 0; 182 } 183 184 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 185 struct drm_file *filp) 186 { 187 int r; 188 uint32_t id; 189 190 union drm_amdgpu_ctx *args = data; 191 struct amdgpu_device *adev = dev->dev_private; 192 struct amdgpu_fpriv *fpriv = filp->driver_priv; 193 194 r = 0; 195 id = args->in.ctx_id; 196 197 switch (args->in.op) { 198 case AMDGPU_CTX_OP_ALLOC_CTX: 199 r = amdgpu_ctx_alloc(adev, fpriv, &id); 200 args->out.alloc.ctx_id = id; 201 break; 202 case AMDGPU_CTX_OP_FREE_CTX: 203 r = amdgpu_ctx_free(fpriv, id); 204 break; 205 case AMDGPU_CTX_OP_QUERY_STATE: 206 r = amdgpu_ctx_query(adev, fpriv, id, &args->out); 207 break; 208 default: 209 return -EINVAL; 210 } 211 212 return r; 213 } 214 215 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) 216 { 217 struct amdgpu_ctx *ctx; 218 struct amdgpu_ctx_mgr *mgr; 219 220 if (!fpriv) 221 return NULL; 222 223 mgr = &fpriv->ctx_mgr; 224 225 mutex_lock(&mgr->lock); 226 ctx = idr_find(&mgr->ctx_handles, id); 227 if (ctx) 228 kref_get(&ctx->refcount); 229 mutex_unlock(&mgr->lock); 230 return ctx; 231 } 232 233 int amdgpu_ctx_put(struct amdgpu_ctx *ctx) 234 { 235 if (ctx == NULL) 236 return -EINVAL; 237 238 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 239 return 0; 240 } 241 242 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, 243 struct fence *fence) 244 { 245 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 246 uint64_t seq = cring->sequence; 247 unsigned idx = 0; 248 struct fence *other = NULL; 249 250 idx = seq % AMDGPU_CTX_MAX_CS_PENDING; 251 other = cring->fences[idx]; 252 if (other) { 253 signed long r; 254 r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); 255 if (r < 0) 256 DRM_ERROR("Error (%ld) waiting for fence!\n", r); 257 } 258 259 fence_get(fence); 260 261 spin_lock(&ctx->ring_lock); 262 cring->fences[idx] = fence; 263 cring->sequence++; 264 spin_unlock(&ctx->ring_lock); 265 266 fence_put(other); 267 268 return seq; 269 } 270 271 struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 272 struct amdgpu_ring *ring, uint64_t seq) 273 { 274 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 275 struct fence *fence; 276 277 spin_lock(&ctx->ring_lock); 278 279 if (seq >= cring->sequence) { 280 spin_unlock(&ctx->ring_lock); 281 return ERR_PTR(-EINVAL); 282 } 283 284 285 if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) { 286 spin_unlock(&ctx->ring_lock); 287 return NULL; 288 } 289 290 fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]); 291 spin_unlock(&ctx->ring_lock); 292 293 return fence; 294 } 295 296 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) 297 { 298 #ifdef __NetBSD__ 299 linux_mutex_init(&mgr->lock); 300 #else 301 mutex_init(&mgr->lock); 302 #endif 303 idr_init(&mgr->ctx_handles); 304 } 305 306 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) 307 { 308 struct amdgpu_ctx *ctx; 309 struct idr *idp; 310 uint32_t id; 311 312 idp = &mgr->ctx_handles; 313 314 idr_for_each_entry(idp, ctx, id) { 315 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) 316 DRM_ERROR("ctx %p is still alive\n", ctx); 317 } 318 319 idr_destroy(&mgr->ctx_handles); 320 #ifdef __NetBSD__ 321 linux_mutex_destroy(&mgr->lock); 322 #else 323 mutex_destroy(&mgr->lock); 324 #endif 325 } 326