Lines Matching full:shared
51 list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP); in objlist_tryalloc()
64 list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_SLEEP); in objlist_alloc()
75 kmem_free(list, offsetof(typeof(*list), shared[n])); in objlist_free()
128 dma_fence_put(robj->fence->shared[i]); in dma_resv_fini()
129 robj->fence->shared[i] = NULL; /* paranoia */ in dma_resv_fini()
300 * Return a pointer to the shared fence list of the reservation
316 * Reserve space in robj to add num_fences shared fences. To be
462 * and length of the shared fence list of robj and return true, or
468 * shared list at the moment. Does not take any fence references.
533 * have previously loaded the shared fence list and should in dma_resv_get_excl_reader()
560 * Empty and release all of robj's shared fences, and clear and
584 /* If there are any shared fences, remember how many. */ in dma_resv_add_excl_fence()
591 /* Replace the fence and zero the shared count. */ in dma_resv_add_excl_fence()
605 /* Release any old shared fences. */ in dma_resv_add_excl_fence()
608 dma_fence_put(old_list->shared[old_shared_count]); in dma_resv_add_excl_fence()
610 old_list->shared[old_shared_count] = NULL; in dma_resv_add_excl_fence()
618 * Acquire a reference to fence and add it to robj's shared list.
623 * call to dma_resv_reserve_shared for each shared fence
656 if (list->shared[i]->context == fence->context) { in dma_resv_add_shared_fence()
657 replace = list->shared[i]; in dma_resv_add_shared_fence()
658 atomic_store_relaxed(&list->shared[i], fence); in dma_resv_add_shared_fence()
665 atomic_store_relaxed(&list->shared[list->shared_count], in dma_resv_add_shared_fence()
690 list->shared[i]->context == fence->context) { in dma_resv_add_shared_fence()
691 replace = list->shared[i]; in dma_resv_add_shared_fence()
692 prealloc->shared[i] = fence; in dma_resv_add_shared_fence()
694 prealloc->shared[i] = list->shared[i]; in dma_resv_add_shared_fence()
702 prealloc->shared[prealloc->shared_count++] = fence; in dma_resv_add_shared_fence()
754 * Get a snapshot of the exclusive and shared fences of robj. The
755 * shared fences are returned as a pointer *sharedp to an array,
770 struct dma_fence **shared = NULL; in dma_resv_get_fences_rcu() local
780 /* If there is a shared list, grab it. */ in dma_resv_get_fences_rcu()
797 if (shared == NULL) { in dma_resv_get_fences_rcu()
803 shared = kcalloc(shared_alloc, sizeof(shared[0]), in dma_resv_get_fences_rcu()
805 if (shared == NULL) { in dma_resv_get_fences_rcu()
812 shared = kcalloc(shared_alloc, in dma_resv_get_fences_rcu()
813 sizeof(shared[0]), GFP_KERNEL); in dma_resv_get_fences_rcu()
814 if (shared == NULL) in dma_resv_get_fences_rcu()
826 kfree(shared); in dma_resv_get_fences_rcu()
827 shared = kcalloc(shared_alloc, sizeof(shared[0]), in dma_resv_get_fences_rcu()
829 if (shared == NULL) in dma_resv_get_fences_rcu()
842 shared[i] = atomic_load_relaxed(&list->shared[i]); in dma_resv_get_fences_rcu()
855 * Try to get a reference to all of the shared fences. in dma_resv_get_fences_rcu()
858 if (dma_fence_get_rcu(atomic_load_relaxed(&shared[i])) == NULL) in dma_resv_get_fences_rcu()
871 shared[shared_count++] = fence; in dma_resv_get_fences_rcu()
873 shared = kmalloc(sizeof(shared[0]), GFP_KERNEL); in dma_resv_get_fences_rcu()
874 shared[0] = fence; in dma_resv_get_fences_rcu()
879 *sharedp = shared; in dma_resv_get_fences_rcu()
885 dma_fence_put(shared[i]); in dma_resv_get_fences_rcu()
886 shared[i] = NULL; /* paranoia */ in dma_resv_get_fences_rcu()
902 * Copy the exclusive fence and all the shared fences from src to
928 /* Get the shared list. */ in dma_resv_copy_fences()
951 fence = atomic_load_relaxed(&src_list->shared[i]); in dma_resv_copy_fences()
959 dst_list->shared[dst_list->shared_count++] = fence; in dma_resv_copy_fences()
977 * We now have a snapshot of the shared and exclusive fences of in dma_resv_copy_fences()
983 /* Get the old shared and exclusive fences, if any. */ in dma_resv_copy_fences()
1006 /* Release any old shared fences. */ in dma_resv_copy_fences()
1009 dma_fence_put(old_list->shared[i]); in dma_resv_copy_fences()
1010 old_list->shared[i] = NULL; /* paranoia */ in dma_resv_copy_fences()
1024 dma_fence_put(dst_list->shared[i]); in dma_resv_copy_fences()
1025 dst_list->shared[i] = NULL; /* paranoia */ in dma_resv_copy_fences()
1033 * dma_resv_test_signaled_rcu(robj, shared)
1035 * If shared is true, test whether all of the shared fences are
1037 * fence is signalled. If shared is false, test only whether the
1040 * XXX Why does this _not_ test the exclusive fence if shared is
1041 * true only if there are no shared fences? This makes no sense.
1045 bool shared) in dma_resv_test_signaled_rcu() argument
1059 /* If shared is requested and there is a shared list, test it. */ in dma_resv_test_signaled_rcu()
1060 if (shared) { in dma_resv_test_signaled_rcu()
1077 fence = atomic_load_relaxed(&list->shared[i]); in dma_resv_test_signaled_rcu()
1118 * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout)
1120 * If shared is true, wait for all of the shared fences to be
1122 * to be signalled. If shared is false, wait only for the
1126 * XXX Why does this _not_ wait for the exclusive fence if shared
1127 * is true only if there are no shared fences? This makes no
1132 bool shared, bool intr, unsigned long timeout) in dma_resv_wait_timeout_rcu() argument
1141 return dma_resv_test_signaled_rcu(robj, shared); in dma_resv_wait_timeout_rcu()
1149 /* If shared is requested and there is a shared list, wait on it. */ in dma_resv_wait_timeout_rcu()
1150 if (shared) { in dma_resv_wait_timeout_rcu()
1167 fence = atomic_load_relaxed(&list->shared[i]); in dma_resv_wait_timeout_rcu()
1259 * spuriously notify them about a shared fence, tough.
1279 * - POLLOUT wait for all fences shared and exclusive
1315 /* If we want to wait for all fences, get the shared list. */ in dma_resv_do_poll()
1333 fence = atomic_load_relaxed(&list->shared[i]); in dma_resv_do_poll()
1345 /* If all shared fences have been signalled, move on. */ in dma_resv_do_poll()
1372 fence = atomic_load_relaxed(&list->shared[i]); in dma_resv_do_poll()