1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: monk liu <monk.liu@amd.com> 23 */ 24 25 #include <drm/drmP.h> 26 #ifdef notyet 27 #include <drm/drm_auth.h> 28 #endif 29 #include "amdgpu.h" 30 #include "amdgpu_sched.h" 31 32 static int amdgpu_ctx_priority_permit(struct drm_file *filp, 33 enum drm_sched_priority priority) 34 { 35 /* NORMAL and below are accessible by everyone */ 36 if (priority <= DRM_SCHED_PRIORITY_NORMAL) 37 return 0; 38 39 #ifdef notyet 40 if (capable(CAP_SYS_NICE)) 41 return 0; 42 #endif 43 44 if (drm_is_current_master(filp)) 45 return 0; 46 47 return -EACCES; 48 } 49 50 static int amdgpu_ctx_init(struct amdgpu_device *adev, 51 enum drm_sched_priority priority, 52 struct drm_file *filp, 53 struct amdgpu_ctx *ctx) 54 { 55 unsigned i, j; 56 int r; 57 58 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) 59 return -EINVAL; 60 61 r = amdgpu_ctx_priority_permit(filp, priority); 62 if (r) 63 return r; 64 65 memset(ctx, 0, sizeof(*ctx)); 66 ctx->adev = adev; 67 kref_init(&ctx->refcount); 68 mtx_init(&ctx->ring_lock, IPL_TTY); 69 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, 70 sizeof(struct dma_fence*), GFP_KERNEL); 71 if (!ctx->fences) 72 return -ENOMEM; 73 74 rw_init(&ctx->lock, "amctxlk"); 75 76 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 77 ctx->rings[i].sequence = 1; 78 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; 79 } 80 81 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); 82 ctx->reset_counter_query = ctx->reset_counter; 83 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); 84 ctx->init_priority = priority; 85 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; 86 87 /* create context entity for each ring */ 88 for (i = 0; i < adev->num_rings; i++) { 89 struct amdgpu_ring *ring = adev->rings[i]; 90 struct drm_sched_rq *rq; 91 92 rq = &ring->sched.sched_rq[priority]; 93 94 if (ring == &adev->gfx.kiq.ring) 95 continue; 96 97 r = drm_sched_entity_init(&ctx->rings[i].entity, 98 &rq, 1, &ctx->guilty); 99 if (r) 100 goto failed; 101 } 102 103 r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr); 104 if (r) 105 goto failed; 106 107 return 0; 108 109 failed: 110 for (j = 0; j < i; j++) 111 drm_sched_entity_destroy(&ctx->rings[j].entity); 112 kfree(ctx->fences); 113 ctx->fences = NULL; 114 return r; 115 } 116 117 static void amdgpu_ctx_fini(struct kref *ref) 118 { 119 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); 120 struct amdgpu_device *adev = ctx->adev; 121 unsigned i, j; 122 123 if (!adev) 124 return; 125 126 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 127 for (j = 0; j < amdgpu_sched_jobs; ++j) 128 dma_fence_put(ctx->rings[i].fences[j]); 129 kfree(ctx->fences); 130 ctx->fences = NULL; 131 132 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); 133 134 mutex_destroy(&ctx->lock); 135 136 kfree(ctx); 137 } 138 139 static int amdgpu_ctx_alloc(struct amdgpu_device *adev, 140 struct amdgpu_fpriv *fpriv, 141 struct drm_file *filp, 142 enum drm_sched_priority priority, 143 uint32_t *id) 144 { 145 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 146 struct amdgpu_ctx *ctx; 147 int r; 148 149 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 150 if (!ctx) 151 return -ENOMEM; 152 153 mutex_lock(&mgr->lock); 154 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); 155 if (r < 0) { 156 mutex_unlock(&mgr->lock); 157 kfree(ctx); 158 return r; 159 } 160 161 *id = (uint32_t)r; 162 r = amdgpu_ctx_init(adev, priority, filp, ctx); 163 if (r) { 164 idr_remove(&mgr->ctx_handles, *id); 165 *id = 0; 166 kfree(ctx); 167 } 168 mutex_unlock(&mgr->lock); 169 return r; 170 } 171 172 static void amdgpu_ctx_do_release(struct kref *ref) 173 { 174 struct amdgpu_ctx *ctx; 175 u32 i; 176 177 ctx = container_of(ref, struct amdgpu_ctx, refcount); 178 179 for (i = 0; i < ctx->adev->num_rings; i++) { 180 181 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 182 continue; 183 184 drm_sched_entity_destroy(&ctx->rings[i].entity); 185 } 186 187 amdgpu_ctx_fini(ref); 188 } 189 190 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) 191 { 192 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 193 struct amdgpu_ctx *ctx; 194 195 mutex_lock(&mgr->lock); 196 ctx = idr_remove(&mgr->ctx_handles, id); 197 if (ctx) 198 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 199 mutex_unlock(&mgr->lock); 200 return ctx ? 0 : -EINVAL; 201 } 202 203 static int amdgpu_ctx_query(struct amdgpu_device *adev, 204 struct amdgpu_fpriv *fpriv, uint32_t id, 205 union drm_amdgpu_ctx_out *out) 206 { 207 struct amdgpu_ctx *ctx; 208 struct amdgpu_ctx_mgr *mgr; 209 unsigned reset_counter; 210 211 if (!fpriv) 212 return -EINVAL; 213 214 mgr = &fpriv->ctx_mgr; 215 mutex_lock(&mgr->lock); 216 ctx = idr_find(&mgr->ctx_handles, id); 217 if (!ctx) { 218 mutex_unlock(&mgr->lock); 219 return -EINVAL; 220 } 221 222 /* TODO: these two are always zero */ 223 out->state.flags = 0x0; 224 out->state.hangs = 0x0; 225 226 /* determine if a GPU reset has occured since the last call */ 227 reset_counter = atomic_read(&adev->gpu_reset_counter); 228 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */ 229 if (ctx->reset_counter_query == reset_counter) 230 out->state.reset_status = AMDGPU_CTX_NO_RESET; 231 else 232 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET; 233 ctx->reset_counter_query = reset_counter; 234 235 mutex_unlock(&mgr->lock); 236 return 0; 237 } 238 239 static int amdgpu_ctx_query2(struct amdgpu_device *adev, 240 struct amdgpu_fpriv *fpriv, uint32_t id, 241 union drm_amdgpu_ctx_out *out) 242 { 243 struct amdgpu_ctx *ctx; 244 struct amdgpu_ctx_mgr *mgr; 245 246 if (!fpriv) 247 return -EINVAL; 248 249 mgr = &fpriv->ctx_mgr; 250 mutex_lock(&mgr->lock); 251 ctx = idr_find(&mgr->ctx_handles, id); 252 if (!ctx) { 253 mutex_unlock(&mgr->lock); 254 return -EINVAL; 255 } 256 257 out->state.flags = 0x0; 258 out->state.hangs = 0x0; 259 260 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter)) 261 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET; 262 263 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) 264 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST; 265 266 if (atomic_read(&ctx->guilty)) 267 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; 268 269 mutex_unlock(&mgr->lock); 270 return 0; 271 } 272 273 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 274 struct drm_file *filp) 275 { 276 int r; 277 uint32_t id; 278 enum drm_sched_priority priority; 279 280 union drm_amdgpu_ctx *args = data; 281 struct amdgpu_device *adev = dev->dev_private; 282 struct amdgpu_fpriv *fpriv = filp->driver_priv; 283 284 r = 0; 285 id = args->in.ctx_id; 286 priority = amdgpu_to_sched_priority(args->in.priority); 287 288 /* For backwards compatibility reasons, we need to accept 289 * ioctls with garbage in the priority field */ 290 if (priority == DRM_SCHED_PRIORITY_INVALID) 291 priority = DRM_SCHED_PRIORITY_NORMAL; 292 293 switch (args->in.op) { 294 case AMDGPU_CTX_OP_ALLOC_CTX: 295 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id); 296 args->out.alloc.ctx_id = id; 297 break; 298 case AMDGPU_CTX_OP_FREE_CTX: 299 r = amdgpu_ctx_free(fpriv, id); 300 break; 301 case AMDGPU_CTX_OP_QUERY_STATE: 302 r = amdgpu_ctx_query(adev, fpriv, id, &args->out); 303 break; 304 case AMDGPU_CTX_OP_QUERY_STATE2: 305 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out); 306 break; 307 default: 308 return -EINVAL; 309 } 310 311 return r; 312 } 313 314 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) 315 { 316 struct amdgpu_ctx *ctx; 317 struct amdgpu_ctx_mgr *mgr; 318 319 if (!fpriv) 320 return NULL; 321 322 mgr = &fpriv->ctx_mgr; 323 324 mutex_lock(&mgr->lock); 325 ctx = idr_find(&mgr->ctx_handles, id); 326 if (ctx) 327 kref_get(&ctx->refcount); 328 mutex_unlock(&mgr->lock); 329 return ctx; 330 } 331 332 int amdgpu_ctx_put(struct amdgpu_ctx *ctx) 333 { 334 if (ctx == NULL) 335 return -EINVAL; 336 337 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 338 return 0; 339 } 340 341 int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, 342 struct dma_fence *fence, uint64_t* handler) 343 { 344 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 345 uint64_t seq = cring->sequence; 346 unsigned idx = 0; 347 struct dma_fence *other = NULL; 348 349 idx = seq & (amdgpu_sched_jobs - 1); 350 other = cring->fences[idx]; 351 if (other) 352 BUG_ON(!dma_fence_is_signaled(other)); 353 354 dma_fence_get(fence); 355 356 spin_lock(&ctx->ring_lock); 357 cring->fences[idx] = fence; 358 cring->sequence++; 359 spin_unlock(&ctx->ring_lock); 360 361 dma_fence_put(other); 362 if (handler) 363 *handler = seq; 364 365 return 0; 366 } 367 368 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 369 struct amdgpu_ring *ring, uint64_t seq) 370 { 371 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 372 struct dma_fence *fence; 373 374 spin_lock(&ctx->ring_lock); 375 376 if (seq == ~0ull) 377 seq = ctx->rings[ring->idx].sequence - 1; 378 379 if (seq >= cring->sequence) { 380 spin_unlock(&ctx->ring_lock); 381 return ERR_PTR(-EINVAL); 382 } 383 384 385 if (seq + amdgpu_sched_jobs < cring->sequence) { 386 spin_unlock(&ctx->ring_lock); 387 return NULL; 388 } 389 390 fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); 391 spin_unlock(&ctx->ring_lock); 392 393 return fence; 394 } 395 396 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, 397 enum drm_sched_priority priority) 398 { 399 int i; 400 struct amdgpu_device *adev = ctx->adev; 401 struct drm_sched_rq *rq; 402 struct drm_sched_entity *entity; 403 struct amdgpu_ring *ring; 404 enum drm_sched_priority ctx_prio; 405 406 ctx->override_priority = priority; 407 408 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? 409 ctx->init_priority : ctx->override_priority; 410 411 for (i = 0; i < adev->num_rings; i++) { 412 ring = adev->rings[i]; 413 entity = &ctx->rings[i].entity; 414 rq = &ring->sched.sched_rq[ctx_prio]; 415 416 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 417 continue; 418 419 drm_sched_entity_set_rq(entity, rq); 420 } 421 } 422 423 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) 424 { 425 struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id]; 426 unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1); 427 struct dma_fence *other = cring->fences[idx]; 428 429 if (other) { 430 signed long r; 431 r = dma_fence_wait(other, true); 432 if (r < 0) { 433 if (r != -ERESTARTSYS) 434 DRM_ERROR("Error (%ld) waiting for fence!\n", r); 435 436 return r; 437 } 438 } 439 440 return 0; 441 } 442 443 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) 444 { 445 rw_init(&mgr->lock, "mgrlk"); 446 idr_init(&mgr->ctx_handles); 447 } 448 449 void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) 450 { 451 struct amdgpu_ctx *ctx; 452 struct idr *idp; 453 uint32_t id, i; 454 long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY; 455 456 idp = &mgr->ctx_handles; 457 458 mutex_lock(&mgr->lock); 459 idr_for_each_entry(idp, ctx, id) { 460 461 if (!ctx->adev) { 462 mutex_unlock(&mgr->lock); 463 return; 464 } 465 466 for (i = 0; i < ctx->adev->num_rings; i++) { 467 468 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 469 continue; 470 471 max_wait = drm_sched_entity_flush(&ctx->rings[i].entity, 472 max_wait); 473 } 474 } 475 mutex_unlock(&mgr->lock); 476 } 477 478 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) 479 { 480 struct amdgpu_ctx *ctx; 481 struct idr *idp; 482 uint32_t id, i; 483 484 idp = &mgr->ctx_handles; 485 486 idr_for_each_entry(idp, ctx, id) { 487 488 if (!ctx->adev) 489 return; 490 491 for (i = 0; i < ctx->adev->num_rings; i++) { 492 493 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) 494 continue; 495 496 if (kref_read(&ctx->refcount) == 1) 497 drm_sched_entity_fini(&ctx->rings[i].entity); 498 else 499 DRM_ERROR("ctx %p is still alive\n", ctx); 500 } 501 } 502 } 503 504 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) 505 { 506 struct amdgpu_ctx *ctx; 507 struct idr *idp; 508 uint32_t id; 509 510 amdgpu_ctx_mgr_entity_fini(mgr); 511 512 idp = &mgr->ctx_handles; 513 514 idr_for_each_entry(idp, ctx, id) { 515 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1) 516 DRM_ERROR("ctx %p is still alive\n", ctx); 517 } 518 519 idr_destroy(&mgr->ctx_handles); 520 mutex_destroy(&mgr->lock); 521 } 522