1 /* $OpenBSD: radeon_fence.c,v 1.10 2015/09/27 11:09:26 jsg Exp $ */ 2 /* 3 * Copyright 2009 Jerome Glisse. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 20 * USE OR OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * The above copyright notice and this permission notice (including the 23 * next paragraph) shall be included in all copies or substantial portions 24 * of the Software. 25 * 26 */ 27 /* 28 * Authors: 29 * Jerome Glisse <glisse@freedesktop.org> 30 * Dave Airlie 31 */ 32 #include <dev/pci/drm/drmP.h> 33 #include "radeon_reg.h" 34 #include "radeon.h" 35 #include "radeon_trace.h" 36 37 /* 38 * Fences 39 * Fences mark an event in the GPUs pipeline and are used 40 * for GPU/CPU synchronization. When the fence is written, 41 * it is expected that all buffers associated with that fence 42 * are no longer in use by the associated ring on the GPU and 43 * that the the relevant GPU caches have been flushed. Whether 44 * we use a scratch register or memory location depends on the asic 45 * and whether writeback is enabled. 46 */ 47 48 /** 49 * radeon_fence_write - write a fence value 50 * 51 * @rdev: radeon_device pointer 52 * @seq: sequence number to write 53 * @ring: ring index the fence is associated with 54 * 55 * Writes a fence value to memory or a scratch register (all asics). 56 */ 57 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) 58 { 59 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 60 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 61 *drv->cpu_addr = cpu_to_le32(seq); 62 } else { 63 WREG32(drv->scratch_reg, seq); 64 } 65 } 66 67 /** 68 * radeon_fence_read - read a fence value 69 * 70 * @rdev: radeon_device pointer 71 * @ring: ring index the fence is associated with 72 * 73 * Reads a fence value from memory or a scratch register (all asics). 74 * Returns the value of the fence read from memory or register. 75 */ 76 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) 77 { 78 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 79 u32 seq = 0; 80 81 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 82 seq = le32_to_cpu(*drv->cpu_addr); 83 } else { 84 seq = RREG32(drv->scratch_reg); 85 } 86 return seq; 87 } 88 89 /** 90 * radeon_fence_emit - emit a fence on the requested ring 91 * 92 * @rdev: radeon_device pointer 93 * @fence: radeon fence object 94 * @ring: ring index the fence is associated with 95 * 96 * Emits a fence command on the requested ring (all asics). 97 * Returns 0 on success, -ENOMEM on failure. 98 */ 99 int radeon_fence_emit(struct radeon_device *rdev, 100 struct radeon_fence **fence, 101 int ring) 102 { 103 /* we are protected by the ring emission mutex */ 104 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); 105 if ((*fence) == NULL) { 106 return -ENOMEM; 107 } 108 kref_init(&((*fence)->kref)); 109 (*fence)->rdev = rdev; 110 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; 111 (*fence)->ring = ring; 112 radeon_fence_ring_emit(rdev, ring, *fence); 113 trace_radeon_fence_emit(rdev->ddev, (*fence)->seq); 114 return 0; 115 } 116 117 /** 118 * radeon_fence_process - process a fence 119 * 120 * @rdev: radeon_device pointer 121 * @ring: ring index the fence is associated with 122 * 123 * Checks the current fence value and wakes the fence queue 124 * if the sequence number has increased (all asics). 125 */ 126 void radeon_fence_process(struct radeon_device *rdev, int ring) 127 { 128 uint64_t seq, last_seq, last_emitted; 129 unsigned count_loop = 0; 130 bool wake = false; 131 132 /* Note there is a scenario here for an infinite loop but it's 133 * very unlikely to happen. For it to happen, the current polling 134 * process need to be interrupted by another process and another 135 * process needs to update the last_seq btw the atomic read and 136 * xchg of the current process. 137 * 138 * More over for this to go in infinite loop there need to be 139 * continuously new fence signaled ie radeon_fence_read needs 140 * to return a different value each time for both the currently 141 * polling process and the other process that xchg the last_seq 142 * btw atomic read and xchg of the current process. And the 143 * value the other process set as last seq must be higher than 144 * the seq value we just read. Which means that current process 145 * need to be interrupted after radeon_fence_read and before 146 * atomic xchg. 147 * 148 * To be even more safe we count the number of time we loop and 149 * we bail after 10 loop just accepting the fact that we might 150 * have temporarly set the last_seq not to the true real last 151 * seq but to an older one. 152 */ 153 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); 154 do { 155 last_emitted = rdev->fence_drv[ring].sync_seq[ring]; 156 seq = radeon_fence_read(rdev, ring); 157 seq |= last_seq & 0xffffffff00000000LL; 158 if (seq < last_seq) { 159 seq &= 0xffffffff; 160 seq |= last_emitted & 0xffffffff00000000LL; 161 } 162 163 if (seq <= last_seq || seq > last_emitted) { 164 break; 165 } 166 /* If we loop over we don't want to return without 167 * checking if a fence is signaled as it means that the 168 * seq we just read is different from the previous on. 169 */ 170 wake = true; 171 last_seq = seq; 172 if ((count_loop++) > 10) { 173 /* We looped over too many time leave with the 174 * fact that we might have set an older fence 175 * seq then the current real last seq as signaled 176 * by the hw. 177 */ 178 break; 179 } 180 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); 181 182 if (wake) { 183 rdev->fence_drv[ring].last_activity = jiffies; 184 wake_up_all(&rdev->fence_queue); 185 } 186 } 187 188 /** 189 * radeon_fence_destroy - destroy a fence 190 * 191 * @kref: fence kref 192 * 193 * Frees the fence object (all asics). 194 */ 195 static void radeon_fence_destroy(struct kref *kref) 196 { 197 struct radeon_fence *fence; 198 199 fence = container_of(kref, struct radeon_fence, kref); 200 kfree(fence); 201 } 202 203 /** 204 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled 205 * 206 * @rdev: radeon device pointer 207 * @seq: sequence number 208 * @ring: ring index the fence is associated with 209 * 210 * Check if the last singled fence sequnce number is >= the requested 211 * sequence number (all asics). 212 * Returns true if the fence has signaled (current fence value 213 * is >= requested value) or false if it has not (current fence 214 * value is < the requested value. Helper function for 215 * radeon_fence_signaled(). 216 */ 217 static bool radeon_fence_seq_signaled(struct radeon_device *rdev, 218 u64 seq, unsigned ring) 219 { 220 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 221 return true; 222 } 223 /* poll new last sequence at least once */ 224 radeon_fence_process(rdev, ring); 225 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 226 return true; 227 } 228 return false; 229 } 230 231 /** 232 * radeon_fence_signaled - check if a fence has signaled 233 * 234 * @fence: radeon fence object 235 * 236 * Check if the requested fence has signaled (all asics). 237 * Returns true if the fence has signaled or false if it has not. 238 */ 239 bool radeon_fence_signaled(struct radeon_fence *fence) 240 { 241 if (!fence) { 242 return true; 243 } 244 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { 245 return true; 246 } 247 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { 248 fence->seq = RADEON_FENCE_SIGNALED_SEQ; 249 return true; 250 } 251 return false; 252 } 253 254 /** 255 * radeon_fence_wait_seq - wait for a specific sequence number 256 * 257 * @rdev: radeon device pointer 258 * @target_seq: sequence number we want to wait for 259 * @ring: ring index the fence is associated with 260 * @intr: use interruptable sleep 261 * @lock_ring: whether the ring should be locked or not 262 * 263 * Wait for the requested sequence number to be written (all asics). 264 * @intr selects whether to use interruptable (true) or non-interruptable 265 * (false) sleep when waiting for the sequence number. Helper function 266 * for radeon_fence_wait(), et al. 267 * Returns 0 if the sequence number has passed, error for all other cases. 268 * -EDEADLK is returned when a GPU lockup has been detected and the ring is 269 * marked as not ready so no further jobs get scheduled until a successful 270 * reset. 271 */ 272 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, 273 unsigned ring, bool intr, bool lock_ring) 274 { 275 unsigned long timeout, last_activity; 276 uint64_t seq; 277 unsigned i; 278 bool signaled; 279 int r, error; 280 281 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { 282 if (!rdev->ring[ring].ready) { 283 return -EBUSY; 284 } 285 286 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; 287 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { 288 /* the normal case, timeout is somewhere before last_activity */ 289 timeout = rdev->fence_drv[ring].last_activity - timeout; 290 } else { 291 /* either jiffies wrapped around, or no fence was signaled in the last 500ms 292 * anyway we will just wait for the minimum amount and then check for a lockup 293 */ 294 timeout = 1; 295 } 296 seq = atomic64_read(&rdev->fence_drv[ring].last_seq); 297 /* Save current last activity valuee, used to check for GPU lockups */ 298 last_activity = rdev->fence_drv[ring].last_activity; 299 300 trace_radeon_fence_wait_begin(rdev->ddev, seq); 301 radeon_irq_kms_sw_irq_get(rdev, ring); 302 r = timeout; 303 while (r > 0) { 304 signaled = radeon_fence_seq_signaled(rdev, target_seq, ring); 305 if (signaled) 306 break; 307 error = tsleep(&rdev->fence_queue, 308 PZERO | (intr ? PCATCH : 0), "rfnwt", timeout); 309 if (error == ERESTART) 310 error = EINTR; /* XXX */ 311 if (error == EWOULDBLOCK) 312 error = 0; 313 r = -error; 314 } 315 radeon_irq_kms_sw_irq_put(rdev, ring); 316 if (unlikely(r < 0)) { 317 return r; 318 } 319 trace_radeon_fence_wait_end(rdev->ddev, seq); 320 321 if (unlikely(!signaled)) { 322 /* we were interrupted for some reason and fence 323 * isn't signaled yet, resume waiting */ 324 if (r) { 325 continue; 326 } 327 328 /* check if sequence value has changed since last_activity */ 329 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { 330 continue; 331 } 332 333 if (lock_ring) { 334 mutex_lock(&rdev->ring_lock); 335 } 336 337 /* test if somebody else has already decided that this is a lockup */ 338 if (last_activity != rdev->fence_drv[ring].last_activity) { 339 if (lock_ring) { 340 mutex_unlock(&rdev->ring_lock); 341 } 342 continue; 343 } 344 345 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 346 /* good news we believe it's a lockup */ 347 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", 348 target_seq, seq); 349 350 /* change last activity so nobody else think there is a lockup */ 351 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 352 rdev->fence_drv[i].last_activity = jiffies; 353 } 354 355 /* mark the ring as not ready any more */ 356 rdev->ring[ring].ready = false; 357 if (lock_ring) { 358 mutex_unlock(&rdev->ring_lock); 359 } 360 return -EDEADLK; 361 } 362 363 if (lock_ring) { 364 mutex_unlock(&rdev->ring_lock); 365 } 366 } 367 } 368 return 0; 369 } 370 371 /** 372 * radeon_fence_wait - wait for a fence to signal 373 * 374 * @fence: radeon fence object 375 * @intr: use interruptable sleep 376 * 377 * Wait for the requested fence to signal (all asics). 378 * @intr selects whether to use interruptable (true) or non-interruptable 379 * (false) sleep when waiting for the fence. 380 * Returns 0 if the fence has passed, error for all other cases. 381 */ 382 int radeon_fence_wait(struct radeon_fence *fence, bool intr) 383 { 384 int r; 385 386 if (fence == NULL) { 387 WARN(1, "Querying an invalid fence : %p !\n", fence); 388 return -EINVAL; 389 } 390 391 r = radeon_fence_wait_seq(fence->rdev, fence->seq, 392 fence->ring, intr, true); 393 if (r) { 394 return r; 395 } 396 fence->seq = RADEON_FENCE_SIGNALED_SEQ; 397 return 0; 398 } 399 400 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) 401 { 402 unsigned i; 403 404 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 405 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { 406 return true; 407 } 408 } 409 return false; 410 } 411 412 /** 413 * radeon_fence_wait_any_seq - wait for a sequence number on any ring 414 * 415 * @rdev: radeon device pointer 416 * @target_seq: sequence number(s) we want to wait for 417 * @intr: use interruptable sleep 418 * 419 * Wait for the requested sequence number(s) to be written by any ring 420 * (all asics). Sequnce number array is indexed by ring id. 421 * @intr selects whether to use interruptable (true) or non-interruptable 422 * (false) sleep when waiting for the sequence number. Helper function 423 * for radeon_fence_wait_any(), et al. 424 * Returns 0 if the sequence number has passed, error for all other cases. 425 */ 426 static int radeon_fence_wait_any_seq(struct radeon_device *rdev, 427 u64 *target_seq, bool intr) 428 { 429 unsigned long timeout, last_activity, tmp; 430 unsigned i, ring = RADEON_NUM_RINGS; 431 bool signaled; 432 int r, error; 433 434 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { 435 if (!target_seq[i]) { 436 continue; 437 } 438 439 /* use the most recent one as indicator */ 440 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { 441 last_activity = rdev->fence_drv[i].last_activity; 442 } 443 444 /* For lockup detection just pick the lowest ring we are 445 * actively waiting for 446 */ 447 if (i < ring) { 448 ring = i; 449 } 450 } 451 452 /* nothing to wait for ? */ 453 if (ring == RADEON_NUM_RINGS) { 454 return -ENOENT; 455 } 456 457 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { 458 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; 459 if (time_after(last_activity, timeout)) { 460 /* the normal case, timeout is somewhere before last_activity */ 461 timeout = last_activity - timeout; 462 } else { 463 /* either jiffies wrapped around, or no fence was signaled in the last 500ms 464 * anyway we will just wait for the minimum amount and then check for a lockup 465 */ 466 timeout = 1; 467 } 468 469 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]); 470 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 471 if (target_seq[i]) { 472 radeon_irq_kms_sw_irq_get(rdev, i); 473 } 474 } 475 r = timeout; 476 while (r > 0) { 477 signaled = radeon_fence_any_seq_signaled(rdev, target_seq); 478 if (signaled) 479 break; 480 error = tsleep(&rdev->fence_queue, 481 PZERO | (intr ? PCATCH : 0), "rfwa", timeout); 482 if (error == ERESTART) 483 error = EINTR; /* XXX */ 484 if (error == EWOULDBLOCK) 485 error = 0; 486 r = -error; 487 } 488 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 489 if (target_seq[i]) { 490 radeon_irq_kms_sw_irq_put(rdev, i); 491 } 492 } 493 if (unlikely(r < 0)) { 494 return r; 495 } 496 trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]); 497 498 if (unlikely(!signaled)) { 499 /* we were interrupted for some reason and fence 500 * isn't signaled yet, resume waiting */ 501 if (r) { 502 continue; 503 } 504 505 mutex_lock(&rdev->ring_lock); 506 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { 507 if (time_after(rdev->fence_drv[i].last_activity, tmp)) { 508 tmp = rdev->fence_drv[i].last_activity; 509 } 510 } 511 /* test if somebody else has already decided that this is a lockup */ 512 if (last_activity != tmp) { 513 last_activity = tmp; 514 mutex_unlock(&rdev->ring_lock); 515 continue; 516 } 517 518 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 519 /* good news we believe it's a lockup */ 520 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n", 521 target_seq[ring]); 522 523 /* change last activity so nobody else think there is a lockup */ 524 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 525 rdev->fence_drv[i].last_activity = jiffies; 526 } 527 528 /* mark the ring as not ready any more */ 529 rdev->ring[ring].ready = false; 530 mutex_unlock(&rdev->ring_lock); 531 return -EDEADLK; 532 } 533 mutex_unlock(&rdev->ring_lock); 534 } 535 } 536 return 0; 537 } 538 539 /** 540 * radeon_fence_wait_any - wait for a fence to signal on any ring 541 * 542 * @rdev: radeon device pointer 543 * @fences: radeon fence object(s) 544 * @intr: use interruptable sleep 545 * 546 * Wait for any requested fence to signal (all asics). Fence 547 * array is indexed by ring id. @intr selects whether to use 548 * interruptable (true) or non-interruptable (false) sleep when 549 * waiting for the fences. Used by the suballocator. 550 * Returns 0 if any fence has passed, error for all other cases. 551 */ 552 int radeon_fence_wait_any(struct radeon_device *rdev, 553 struct radeon_fence **fences, 554 bool intr) 555 { 556 uint64_t seq[RADEON_NUM_RINGS]; 557 unsigned i; 558 int r; 559 560 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 561 seq[i] = 0; 562 563 if (!fences[i]) { 564 continue; 565 } 566 567 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { 568 /* something was allready signaled */ 569 return 0; 570 } 571 572 seq[i] = fences[i]->seq; 573 } 574 575 r = radeon_fence_wait_any_seq(rdev, seq, intr); 576 if (r) { 577 return r; 578 } 579 return 0; 580 } 581 582 /** 583 * radeon_fence_wait_next_locked - wait for the next fence to signal 584 * 585 * @rdev: radeon device pointer 586 * @ring: ring index the fence is associated with 587 * 588 * Wait for the next fence on the requested ring to signal (all asics). 589 * Returns 0 if the next fence has passed, error for all other cases. 590 * Caller must hold ring lock. 591 */ 592 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) 593 { 594 uint64_t seq; 595 596 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; 597 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { 598 /* nothing to wait for, last_seq is 599 already the last emited fence */ 600 return -ENOENT; 601 } 602 return radeon_fence_wait_seq(rdev, seq, ring, false, false); 603 } 604 605 /** 606 * radeon_fence_wait_empty_locked - wait for all fences to signal 607 * 608 * @rdev: radeon device pointer 609 * @ring: ring index the fence is associated with 610 * 611 * Wait for all fences on the requested ring to signal (all asics). 612 * Returns 0 if the fences have passed, error for all other cases. 613 * Caller must hold ring lock. 614 */ 615 int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) 616 { 617 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; 618 int r; 619 620 r = radeon_fence_wait_seq(rdev, seq, ring, false, false); 621 if (r) { 622 if (r == -EDEADLK) { 623 return -EDEADLK; 624 } 625 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", 626 ring, r); 627 } 628 return 0; 629 } 630 631 /** 632 * radeon_fence_ref - take a ref on a fence 633 * 634 * @fence: radeon fence object 635 * 636 * Take a reference on a fence (all asics). 637 * Returns the fence. 638 */ 639 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 640 { 641 kref_get(&fence->kref); 642 return fence; 643 } 644 645 /** 646 * radeon_fence_unref - remove a ref on a fence 647 * 648 * @fence: radeon fence object 649 * 650 * Remove a reference on a fence (all asics). 651 */ 652 void radeon_fence_unref(struct radeon_fence **fence) 653 { 654 struct radeon_fence *tmp = *fence; 655 656 *fence = NULL; 657 if (tmp) { 658 kref_put(&tmp->kref, radeon_fence_destroy); 659 } 660 } 661 662 /** 663 * radeon_fence_count_emitted - get the count of emitted fences 664 * 665 * @rdev: radeon device pointer 666 * @ring: ring index the fence is associated with 667 * 668 * Get the number of fences emitted on the requested ring (all asics). 669 * Returns the number of emitted fences on the ring. Used by the 670 * dynpm code to ring track activity. 671 */ 672 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) 673 { 674 uint64_t emitted; 675 676 /* We are not protected by ring lock when reading the last sequence 677 * but it's ok to report slightly wrong fence count here. 678 */ 679 radeon_fence_process(rdev, ring); 680 emitted = rdev->fence_drv[ring].sync_seq[ring] 681 - atomic64_read(&rdev->fence_drv[ring].last_seq); 682 /* to avoid 32bits warp around */ 683 if (emitted > 0x10000000) { 684 emitted = 0x10000000; 685 } 686 return (unsigned)emitted; 687 } 688 689 /** 690 * radeon_fence_need_sync - do we need a semaphore 691 * 692 * @fence: radeon fence object 693 * @dst_ring: which ring to check against 694 * 695 * Check if the fence needs to be synced against another ring 696 * (all asics). If so, we need to emit a semaphore. 697 * Returns true if we need to sync with another ring, false if 698 * not. 699 */ 700 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) 701 { 702 struct radeon_fence_driver *fdrv; 703 704 if (!fence) { 705 return false; 706 } 707 708 if (fence->ring == dst_ring) { 709 return false; 710 } 711 712 /* we are protected by the ring mutex */ 713 fdrv = &fence->rdev->fence_drv[dst_ring]; 714 if (fence->seq <= fdrv->sync_seq[fence->ring]) { 715 return false; 716 } 717 718 return true; 719 } 720 721 /** 722 * radeon_fence_note_sync - record the sync point 723 * 724 * @fence: radeon fence object 725 * @dst_ring: which ring to check against 726 * 727 * Note the sequence number at which point the fence will 728 * be synced with the requested ring (all asics). 729 */ 730 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) 731 { 732 struct radeon_fence_driver *dst, *src; 733 unsigned i; 734 735 if (!fence) { 736 return; 737 } 738 739 if (fence->ring == dst_ring) { 740 return; 741 } 742 743 /* we are protected by the ring mutex */ 744 src = &fence->rdev->fence_drv[fence->ring]; 745 dst = &fence->rdev->fence_drv[dst_ring]; 746 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 747 if (i == dst_ring) { 748 continue; 749 } 750 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); 751 } 752 } 753 754 /** 755 * radeon_fence_driver_start_ring - make the fence driver 756 * ready for use on the requested ring. 757 * 758 * @rdev: radeon device pointer 759 * @ring: ring index to start the fence driver on 760 * 761 * Make the fence driver ready for processing (all asics). 762 * Not all asics have all rings, so each asic will only 763 * start the fence driver on the rings it has. 764 * Returns 0 for success, errors for failure. 765 */ 766 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) 767 { 768 uint64_t index; 769 int r; 770 771 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 772 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { 773 rdev->fence_drv[ring].scratch_reg = 0; 774 index = R600_WB_EVENT_OFFSET + ring * 4; 775 } else { 776 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); 777 if (r) { 778 dev_err(rdev->dev, "fence failed to get scratch register\n"); 779 return r; 780 } 781 index = RADEON_WB_SCRATCH_OFFSET + 782 rdev->fence_drv[ring].scratch_reg - 783 rdev->scratch.reg_base; 784 } 785 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 786 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; 787 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); 788 rdev->fence_drv[ring].initialized = true; 789 #ifdef DRMDEBUG 790 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", 791 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); 792 #endif 793 return 0; 794 } 795 796 /** 797 * radeon_fence_driver_init_ring - init the fence driver 798 * for the requested ring. 799 * 800 * @rdev: radeon device pointer 801 * @ring: ring index to start the fence driver on 802 * 803 * Init the fence driver for the requested ring (all asics). 804 * Helper function for radeon_fence_driver_init(). 805 */ 806 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) 807 { 808 int i; 809 810 rdev->fence_drv[ring].scratch_reg = -1; 811 rdev->fence_drv[ring].cpu_addr = NULL; 812 rdev->fence_drv[ring].gpu_addr = 0; 813 for (i = 0; i < RADEON_NUM_RINGS; ++i) 814 rdev->fence_drv[ring].sync_seq[i] = 0; 815 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); 816 rdev->fence_drv[ring].last_activity = jiffies; 817 rdev->fence_drv[ring].initialized = false; 818 } 819 820 /** 821 * radeon_fence_driver_init - init the fence driver 822 * for all possible rings. 823 * 824 * @rdev: radeon device pointer 825 * 826 * Init the fence driver for all possible rings (all asics). 827 * Not all asics have all rings, so each asic will only 828 * start the fence driver on the rings it has using 829 * radeon_fence_driver_start_ring(). 830 * Returns 0 for success. 831 */ 832 int radeon_fence_driver_init(struct radeon_device *rdev) 833 { 834 int ring; 835 836 init_waitqueue_head(&rdev->fence_queue); 837 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 838 radeon_fence_driver_init_ring(rdev, ring); 839 } 840 if (radeon_debugfs_fence_init(rdev)) { 841 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 842 } 843 return 0; 844 } 845 846 /** 847 * radeon_fence_driver_fini - tear down the fence driver 848 * for all possible rings. 849 * 850 * @rdev: radeon device pointer 851 * 852 * Tear down the fence driver for all possible rings (all asics). 853 */ 854 void radeon_fence_driver_fini(struct radeon_device *rdev) 855 { 856 int ring, r; 857 858 mutex_lock(&rdev->ring_lock); 859 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 860 if (!rdev->fence_drv[ring].initialized) 861 continue; 862 r = radeon_fence_wait_empty_locked(rdev, ring); 863 if (r) { 864 /* no need to trigger GPU reset as we are unloading */ 865 radeon_fence_driver_force_completion(rdev); 866 } 867 wake_up_all(&rdev->fence_queue); 868 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 869 rdev->fence_drv[ring].initialized = false; 870 } 871 mutex_unlock(&rdev->ring_lock); 872 } 873 874 /** 875 * radeon_fence_driver_force_completion - force all fence waiter to complete 876 * 877 * @rdev: radeon device pointer 878 * 879 * In case of GPU reset failure make sure no process keep waiting on fence 880 * that will never complete. 881 */ 882 void radeon_fence_driver_force_completion(struct radeon_device *rdev) 883 { 884 int ring; 885 886 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 887 if (!rdev->fence_drv[ring].initialized) 888 continue; 889 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); 890 } 891 } 892 893 894 /* 895 * Fence debugfs 896 */ 897 #if defined(CONFIG_DEBUG_FS) 898 static int radeon_debugfs_fence_info(struct seq_file *m, void *data) 899 { 900 struct drm_info_node *node = (struct drm_info_node *)m->private; 901 struct drm_device *dev = node->minor->dev; 902 struct radeon_device *rdev = dev->dev_private; 903 int i, j; 904 905 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 906 if (!rdev->fence_drv[i].initialized) 907 continue; 908 909 seq_printf(m, "--- ring %d ---\n", i); 910 seq_printf(m, "Last signaled fence 0x%016llx\n", 911 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); 912 seq_printf(m, "Last emitted 0x%016llx\n", 913 rdev->fence_drv[i].sync_seq[i]); 914 915 for (j = 0; j < RADEON_NUM_RINGS; ++j) { 916 if (i != j && rdev->fence_drv[j].initialized) 917 seq_printf(m, "Last sync to ring %d 0x%016llx\n", 918 j, rdev->fence_drv[i].sync_seq[j]); 919 } 920 } 921 return 0; 922 } 923 924 static struct drm_info_list radeon_debugfs_fence_list[] = { 925 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, 926 }; 927 #endif 928 929 int radeon_debugfs_fence_init(struct radeon_device *rdev) 930 { 931 #if defined(CONFIG_DEBUG_FS) 932 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); 933 #else 934 return 0; 935 #endif 936 } 937