Lines Matching defs:fence

45 	return IS_ERR(rcu_access_pointer(active->fence));
138 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
210 return (struct dma_fence ** __force)&active->fence;
214 active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
219 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
223 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
225 if (active_fence_cb(fence, cb))
230 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
232 if (active_fence_cb(fence, cb))
442 struct dma_fence *fence = &rq->fence;
459 RCU_INIT_POINTER(active->fence, NULL);
464 fence = __i915_active_fence_set(active, fence);
465 if (!fence)
468 dma_fence_put(fence);
478 struct dma_fence *fence)
483 RCU_INIT_POINTER(active->fence, fence);
487 prev = __i915_active_fence_set(active, fence);
568 struct dma_fence *fence;
573 fence = i915_active_fence_get(active);
574 if (!fence)
577 dma_fence_enable_sw_signaling(fence);
578 dma_fence_put(fence);
617 /* Any fence added after the wait begins will not be auto-signaled */
640 int (*fn)(void *arg, struct dma_fence *fence),
643 struct dma_fence *fence;
648 fence = i915_active_fence_get(active);
649 if (fence) {
652 err = fn(arg, fence);
653 dma_fence_put(fence);
680 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
689 if (!i915_sw_fence_await(fence)) {
696 wb->base.private = fence;
705 int (*fn)(void *arg, struct dma_fence *fence),
714 rcu_access_pointer(ref->excl.fence)) {
745 static int rq_await_fence(void *arg, struct dma_fence *fence)
747 return i915_request_await_dma_fence(arg, fence);
757 static int sw_await_fence(void *arg, struct dma_fence *fence)
759 return i915_sw_fence_await_dma_fence(arg, fence, 0,
763 int i915_sw_fence_await_active(struct i915_sw_fence *fence,
767 return await_active(ref, flags, sw_await_fence, fence, fence);
910 RCU_INIT_POINTER(node->base.fence, NULL);
926 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
930 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
1032 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1033 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1039 * __i915_active_fence_set: Update the last active fence along its timeline
1041 * @fence: the new fence (under construction)
1043 * Records the new @fence as the last active fence along its timeline in
1045 * fence onto this one. Gets and returns a reference to the previous fence
1047 * that it is executed before the new fence. To ensure that the order of
1053 struct dma_fence *fence)
1062 * to a new, completely unrelated fence that reuses the same memory
1068 * As a countermeasure, we try to get a reference to the active->fence
1074 if (fence == prev)
1075 return fence;
1077 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1081 * C already resident as the active->fence.
1088 * nesting rules for the fence->lock; the inner lock is always the
1091 spin_lock_irqsave(fence->lock, flags);
1100 * active->fence. Meanwhile, B follows the same path as A.
1102 * active->fence, locks it as soon as A completes, and possibly
1105 while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
1110 spin_unlock_irqrestore(fence->lock, flags);
1113 GEM_BUG_ON(prev == fence);
1115 spin_lock_irqsave(fence->lock, flags);
1121 * If prev is NULL then the previous fence must have been signaled
1123 * present then, having the lock on that fence already acquired, we
1128 * As B is second, it sees A as the previous fence and so waits for
1136 list_add_tail(&active->cb.node, &fence->cb_list);
1137 spin_unlock_irqrestore(fence->lock, flags);
1145 struct dma_fence *fence;
1149 fence = __i915_active_fence_set(active, &rq->fence);
1150 if (fence) {
1151 err = i915_request_await_dma_fence(rq, fence);
1152 dma_fence_put(fence);
1158 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1160 active_fence_cb(fence, cb);