Lines Matching defs:fence

3  * Parts ported from amdgpu (fence wait code).
41 * - Import and export a syncobj's underlying fence to/from a sync file
42 * - Reset a syncobj (set its fence to NULL)
43 * - Signal a syncobj (set a trivially signaled fence)
44 * - Wait for a syncobj's fence to appear and be signaled
57 * to an already signaled fence depending on whether the
63 * the syncobj, the syncobj's fence is replaced with a fence which will be
67 * struct &dma_fence_chain pointing to the DRM driver's fence and also
68 * pointing to the previous fence that was in the syncobj. The new struct
69 * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
71 * fence previously in the syncobj.
74 * time the work is enqueued, it waits on the syncobj's fence before
75 * submitting the work to hardware. That fence is either :
77 * - The syncobj's current fence if the syncobj is considered as a binary
82 * If the syncobj's fence is NULL or not present in the syncobj's timeline,
85 * With binary syncobj, all manipulation of the syncobjs's fence happens in
86 * terms of the current fence at the time the ioctl is called by userspace
91 * setting its pointer to a fence which is already signaled.
93 * With a timeline syncobj, all manipulation of the synobj's fence happens in
104 * syncobj's fence when signaling).
114 * Otherwise, it returns once at least one syncobj fence has been signaled
115 * and the index of a signaled fence is written back to the client.
118 * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
120 * fence and then wait on that fence.
122 * syncobjs in the array has a NULL fence, -EINVAL will be returned.
123 * Assuming the syncobj starts off with a NULL fence, this allows a client
127 * This requirement is inherited from the Vulkan fence API.
134 * fence to materialize on the timeline without waiting for the fence to be
167 * import/export the syncobj's current fence from/to a &sync_file.
169 * sycnobj's fence at the time of export and any later signal or reset
171 * When a sync file is imported into a syncobj, the syncobj's fence is set
172 * to the fence wrapped by that sync file.
187 * point 0 to mean take/replace the fence in the syncobj.
191 #include <linux/dma-fence-unwrap.h>
216 struct dma_fence *fence;
226 struct dma_fence *fence;
267 struct dma_fence *fence;
269 if (wait->fence)
273 /* We've already tried once to get a fence and failed. Now that we
275 * callback when a fence has already been set.
277 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
278 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
279 dma_fence_put(fence);
281 } else if (!fence) {
282 wait->fence = dma_fence_get_stub();
284 wait->fence = fence;
304 dma_fence_put(entry->fence);
328 * @fence: fence to encapsulate in the chain node
335 struct dma_fence *fence,
342 dma_fence_get(fence);
350 dma_fence_chain_init(chain, prev, fence, point);
351 rcu_assign_pointer(syncobj->fence, &chain->base);
360 dma_fence_chain_for_each(fence, prev);
366 * drm_syncobj_replace_fence - replace fence in a sync object.
367 * @syncobj: Sync object to replace fence in
368 * @fence: fence to install in sync file.
370 * This replaces the fence on a sync object.
373 struct dma_fence *fence)
379 if (fence)
380 dma_fence_get(fence);
384 old_fence = rcu_dereference_protected(syncobj->fence,
386 rcu_assign_pointer(syncobj->fence, fence);
388 if (fence != old_fence) {
402 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
403 * @syncobj: sync object to assign the fence on
405 * Assign a already signaled stub fence to the sync object.
409 struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
411 if (!fence)
414 drm_syncobj_replace_fence(syncobj, fence);
415 dma_fence_put(fence);
422 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
427 * @fence: out parameter for the fence
432 * Returns 0 on success or a negative error value on failure. On success @fence
433 * contains a reference to the fence, which must be released by calling
438 struct dma_fence **fence)
457 *fence = drm_syncobj_fence_get(syncobj);
459 if (*fence) {
460 ret = dma_fence_chain_find_seqno(fence, point);
464 * fence. To make sure the recipient gets
465 * signalled, use a new fence instead.
467 if (!*fence)
468 *fence = dma_fence_get_stub();
472 dma_fence_put(*fence);
491 if (wait.fence) {
509 *fence = wait.fence;
547 * @fence: if non-NULL, the syncobj will represent this fence
556 struct dma_fence *fence)
578 if (fence)
579 drm_syncobj_replace_fence(syncobj, fence);
761 struct dma_fence *fence = sync_file_get_fence(fd);
764 if (!fence)
769 dma_fence_put(fence);
773 drm_syncobj_replace_fence(syncobj, fence);
774 dma_fence_put(fence);
783 struct dma_fence *fence;
790 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
794 sync_file = sync_file_create(fence);
796 dma_fence_put(fence);
934 * added as timeline fence to a chain again.
981 struct dma_fence *fence;
990 &fence);
994 ret = drm_syncobj_flatten_chain(&fence);
1004 drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
1006 dma_fence_put(fence);
1018 struct dma_fence *fence;
1025 args->src_point, args->flags, &fence);
1028 drm_syncobj_replace_fence(binary_syncobj, fence);
1029 dma_fence_put(fence);
1056 static void syncobj_wait_fence_func(struct dma_fence *fence,
1068 struct dma_fence *fence;
1071 fence = rcu_dereference_protected(syncobj->fence,
1073 dma_fence_get(fence);
1074 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
1075 dma_fence_put(fence);
1077 } else if (!fence) {
1078 wait->fence = dma_fence_get_stub();
1080 wait->fence = fence;
1095 struct dma_fence *fence;
1123 * a syncobj with a missing fence and then never have the chance of
1128 struct dma_fence *fence;
1136 fence = drm_syncobj_fence_get(syncobjs[i]);
1137 if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
1138 dma_fence_put(fence);
1148 if (fence)
1149 entries[i].fence = fence;
1151 entries[i].fence = dma_fence_get_stub();
1154 dma_fence_is_signaled(entries[i].fence)) {
1168 * fence is signaled prior to fence->ops->enable_signaling() being
1184 fence = entries[i].fence;
1185 if (!fence)
1189 dma_fence_is_signaled(fence) ||
1191 dma_fence_add_callback(fence,
1194 /* The fence has been signaled */
1228 dma_fence_remove_callback(entries[i].fence,
1230 dma_fence_put(entries[i].fence);
1430 static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
1445 struct dma_fence *fence;
1448 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
1449 if (!fence)
1452 ret = dma_fence_chain_find_seqno(&fence, entry->point);
1455 dma_fence_put(fence);
1457 } else if (!fence) {
1458 /* If dma_fence_chain_find_seqno returns 0 but sets the fence
1460 * later seqno has already been submitted. Assign a stub fence
1463 fence = dma_fence_get_stub();
1467 entry->fence = fence;
1473 ret = dma_fence_add_callback(fence, &entry->fence_cb,
1662 struct dma_fence *fence = dma_fence_get_stub();
1665 fence, points[i]);
1666 dma_fence_put(fence);
1705 struct dma_fence *fence;
1708 fence = drm_syncobj_fence_get(syncobjs[i]);
1709 chain = to_dma_fence_chain(fence);
1712 dma_fence_get(fence);
1716 point = fence->seqno;
1718 dma_fence_chain_for_each(iter, fence) {
1719 if (iter->context != fence->context) {
1736 dma_fence_put(fence);