Lines Matching defs:fence

50  * for GPU/CPU synchronization.  When the fence is written,
51 * it is expected that all buffers associated with that fence
59 * radeon_fence_write - write a fence value
63 * @ring: ring index the fence is associated with
65 * Writes a fence value to memory or a scratch register (all asics).
80 * radeon_fence_read - read a fence value
83 * @ring: ring index the fence is associated with
85 * Reads a fence value from memory or a scratch register (all asics).
86 * Returns the value of the fence read from memory or register.
125 * radeon_fence_emit - emit a fence on the requested ring
128 * @fence: radeon fence object
129 * @ring: ring index the fence is associated with
131 * Emits a fence command on the requested ring (all asics).
135 struct radeon_fence **fence,
141 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
142 if ((*fence) == NULL) {
145 (*fence)->rdev = rdev;
146 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
147 (*fence)->ring = ring;
148 (*fence)->is_vm_update = false;
149 dma_fence_init(&(*fence)->base, &radeon_fence_ops,
153 radeon_fence_ring_emit(rdev, ring, *fence);
154 trace_radeon_fence_emit(rdev_to_drm(rdev), ring, (*fence)->seq);
163 * for the fence locking itself, so unlocked variants are used for
168 struct radeon_fence *fence;
171 fence = container_of(wait, struct radeon_fence, fence_wake);
177 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
178 if (seq >= fence->seq) {
179 dma_fence_signal_locked(&fence->base);
180 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
181 __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
182 dma_fence_put(&fence->base);
188 * radeon_fence_activity - check for fence activity
191 * @ring: ring index the fence is associated with
193 * Checks the current fence value and calculates the last
194 * signalled fence value. Returns true if activity occured
210 * continuously new fence signaled ie radeon_fence_read needs
238 * checking if a fence is signaled as it means that the
245 * fact that we might have set an older fence
264 * Checks for fence activity and if there is none probe
299 dev_warn(rdev->dev, "GPU lockup (current fence id "
300 "0x%016llx last fence id 0x%016llx on ring %d)\n",
312 * radeon_fence_process - process a fence
315 * @ring: ring index the fence is associated with
317 * Checks the current fence value and wakes the fence queue
327 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
331 * @ring: ring index the fence is associated with
333 * Check if the last signaled fence sequnce number is >= the requested
335 * Returns true if the fence has signaled (current fence value
336 * is >= requested value) or false if it has not (current fence
356 struct radeon_fence *fence = to_radeon_fence(f);
357 struct radeon_device *rdev = fence->rdev;
358 unsigned ring = fence->ring;
359 u64 seq = fence->seq;
377 * radeon_fence_enable_signaling - enable signalling on fence
378 * @f: fence
381 * to fence_queue that checks if this fence is signaled, and if so it
382 * signals the fence and removes itself.
386 struct radeon_fence *fence = to_radeon_fence(f);
387 struct radeon_device *rdev = fence->rdev;
389 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
393 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
395 if (radeon_fence_activity(rdev, fence->ring))
398 /* did fence get signaled after we enabled the sw irq? */
399 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
400 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
408 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
409 rdev->fence_drv[fence->ring].delayed_irq = true;
410 radeon_fence_schedule_check(rdev, fence->ring);
413 fence->fence_wake.flags = 0;
414 fence->fence_wake.private = NULL;
415 fence->fence_wake.func = radeon_fence_check_signaled;
416 __add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
422 * radeon_fence_signaled - check if a fence has signaled
424 * @fence: radeon fence object
426 * Check if the requested fence has signaled (all asics).
427 * Returns true if the fence has signaled or false if it has not.
429 bool radeon_fence_signaled(struct radeon_fence *fence)
431 if (!fence)
434 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
435 dma_fence_signal(&fence->base);
447 * Check if the last signaled fence sequnce number is >= the requested
524 * radeon_fence_wait_timeout - wait for a fence to signal with timeout
526 * @fence: radeon fence object
529 * Wait for the requested fence to signal (all asics).
531 * (false) sleep when waiting for the fence.
536 long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
547 if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
548 return dma_fence_wait(&fence->base, intr);
550 seq[fence->ring] = fence->seq;
551 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
556 dma_fence_signal(&fence->base);
561 * radeon_fence_wait - wait for a fence to signal
563 * @fence: radeon fence object
566 * Wait for the requested fence to signal (all asics).
568 * (false) sleep when waiting for the fence.
569 * Returns 0 if the fence has passed, error for all other cases.
571 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
573 long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
582 * radeon_fence_wait_any - wait for a fence to signal on any ring
585 * @fences: radeon fence object(s)
588 * Wait for any requested fence to signal (all asics). Fence
592 * Returns 0 if any fence has passed, error for all other cases.
625 * radeon_fence_wait_next - wait for the next fence to signal
628 * @ring: ring index the fence is associated with
630 * Wait for the next fence on the requested ring to signal (all asics).
631 * Returns 0 if the next fence has passed, error for all other cases.
642 already the last emited fence */
655 * @ring: ring index the fence is associated with
682 * radeon_fence_ref - take a ref on a fence
684 * @fence: radeon fence object
686 * Take a reference on a fence (all asics).
687 * Returns the fence.
689 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
691 dma_fence_get(&fence->base);
692 return fence;
696 * radeon_fence_unref - remove a ref on a fence
698 * @fence: radeon fence object
700 * Remove a reference on a fence (all asics).
702 void radeon_fence_unref(struct radeon_fence **fence)
704 struct radeon_fence *tmp = *fence;
706 *fence = NULL;
716 * @ring: ring index the fence is associated with
727 * but it's ok to report slightly wrong fence count here.
742 * @fence: radeon fence object
745 * Check if the fence needs to be synced against another ring
750 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
754 if (!fence) {
758 if (fence->ring == dst_ring) {
763 fdrv = &fence->rdev->fence_drv[dst_ring];
764 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
774 * @fence: radeon fence object
777 * Note the sequence number at which point the fence will
780 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
785 if (!fence) {
789 if (fence->ring == dst_ring) {
794 src = &fence->rdev->fence_drv[fence->ring];
795 dst = &fence->rdev->fence_drv[dst_ring];
805 * radeon_fence_driver_start_ring - make the fence driver
809 * @ring: ring index to start the fence driver on
811 * Make the fence driver ready for processing (all asics).
813 * start the fence driver on the rings it has.
831 /* put fence directly behind firmware */
840 dev_err(rdev->dev, "fence failed to get scratch register\n");
851 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx\n",
857 * radeon_fence_driver_init_ring - init the fence driver
861 * @ring: ring index to start the fence driver on
863 * Init the fence driver for the requested ring (all asics).
883 * radeon_fence_driver_init - init the fence driver
888 * Init the fence driver for all possible rings (all asics).
890 * start the fence driver on the rings it has using
906 * radeon_fence_driver_fini - tear down the fence driver
911 * Tear down the fence driver for all possible rings (all asics).
935 * radeon_fence_driver_force_completion - force all fence waiter to complete
940 * In case of GPU reset failure make sure no process keep waiting on fence
968 seq_printf(m, "Last signaled fence 0x%016llx\n",
985 * Manually trigger a gpu reset at the next fence wait.
1018 static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
1025 struct radeon_fence *fence = to_radeon_fence(f);
1026 switch (fence->ring) {
1039 static inline bool radeon_test_signaled(struct radeon_fence *fence)
1041 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1050 radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1061 struct radeon_fence *fence = to_radeon_fence(f);
1062 struct radeon_device *rdev = fence->rdev;
1080 if (radeon_test_signaled(fence))