Lines Matching defs:ref
29 struct i915_active *ref;
77 struct i915_active *ref = addr;
79 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
87 static void debug_active_init(struct i915_active *ref)
89 debug_object_init(ref, &active_debug_desc);
92 static void debug_active_activate(struct i915_active *ref)
94 lockdep_assert_held(&ref->tree_lock);
95 debug_object_activate(ref, &active_debug_desc);
98 static void debug_active_deactivate(struct i915_active *ref)
100 lockdep_assert_held(&ref->tree_lock);
101 if (!atomic_read(&ref->count)) /* after the last dec */
102 debug_object_deactivate(ref, &active_debug_desc);
105 static void debug_active_fini(struct i915_active *ref)
107 debug_object_free(ref, &active_debug_desc);
110 static void debug_active_assert(struct i915_active *ref)
112 debug_object_assert_init(ref, &active_debug_desc);
117 static inline void debug_active_init(struct i915_active *ref) { }
118 static inline void debug_active_activate(struct i915_active *ref) { }
119 static inline void debug_active_deactivate(struct i915_active *ref) { }
120 static inline void debug_active_fini(struct i915_active *ref) { }
121 static inline void debug_active_assert(struct i915_active *ref) { }
126 __active_retire(struct i915_active *ref)
132 GEM_BUG_ON(i915_active_is_idle(ref));
135 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
138 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
139 debug_active_deactivate(ref);
142 if (!ref->cache)
143 ref->cache = fetch_node(ref->tree.rb_node);
146 if (ref->cache) {
148 rb_erase(&ref->cache->node, &ref->tree);
149 root = ref->tree;
152 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
153 rb_insert_color(&ref->cache->node, &ref->tree);
154 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
157 ref->cache->timeline = 0; /* needs cmpxchg(u64) */
160 spin_unlock_irqrestore(&ref->tree_lock, flags);
163 if (ref->retire)
164 ref->retire(ref);
167 wake_up_var(ref);
183 struct i915_active *ref = container_of(wrk, typeof(*ref), work);
185 GEM_BUG_ON(!atomic_read(&ref->count));
186 if (atomic_add_unless(&ref->count, -1, 1))
189 __active_retire(ref);
193 active_retire(struct i915_active *ref)
195 GEM_BUG_ON(!atomic_read(&ref->count));
196 if (atomic_add_unless(&ref->count, -1, 1))
199 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
200 queue_work(system_unbound_wq, &ref->work);
204 __active_retire(ref);
226 active_retire(container_of(cb, struct active_node, base.cb)->ref);
236 static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
249 it = READ_ONCE(ref->cache);
274 GEM_BUG_ON(i915_active_is_idle(ref));
276 it = fetch_node(ref->tree.rb_node);
283 WRITE_ONCE(ref->cache, it);
293 active_instance(struct i915_active *ref, u64 idx)
298 node = __active_lookup(ref, idx);
302 spin_lock_irq(&ref->tree_lock);
303 GEM_BUG_ON(i915_active_is_idle(ref));
306 p = &ref->tree.rb_node;
333 node->ref = ref;
337 rb_insert_color(&node->node, &ref->tree);
340 WRITE_ONCE(ref->cache, node);
341 spin_unlock_irq(&ref->tree_lock);
346 void __i915_active_init(struct i915_active *ref,
347 int (*active)(struct i915_active *ref),
348 void (*retire)(struct i915_active *ref),
353 debug_active_init(ref);
355 ref->flags = flags;
356 ref->active = active;
357 ref->retire = retire;
359 mtx_init(&ref->tree_lock, IPL_TTY);
360 ref->tree = RB_ROOT;
361 ref->cache = NULL;
363 init_llist_head(&ref->preallocated_barriers);
364 atomic_set(&ref->count, 0);
366 __mutex_init(&ref->mutex, "i915_active", mkey);
368 rw_init(&ref->mutex, "i915_active");
370 __i915_active_fence_init(&ref->excl, NULL, excl_retire);
371 INIT_WORK(&ref->work, active_work);
373 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
377 static bool ____active_del_barrier(struct i915_active *ref,
420 __active_del_barrier(struct i915_active *ref, struct active_node *node)
422 return ____active_del_barrier(ref, node, barrier_to_engine(node));
426 replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
436 return __active_del_barrier(ref, node_from_active(active));
439 int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
447 err = i915_active_acquire(ref);
452 active = active_instance(ref, idx);
458 if (replace_barrier(ref, active)) {
460 atomic_dec(&ref->count);
466 __i915_active_acquire(ref);
471 i915_active_release(ref);
476 __i915_active_set_fence(struct i915_active *ref,
482 if (replace_barrier(ref, active)) {
489 __i915_active_acquire(ref);
495 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
498 return __i915_active_set_fence(ref, &ref->excl, f);
501 bool i915_active_acquire_if_busy(struct i915_active *ref)
503 debug_active_assert(ref);
504 return atomic_add_unless(&ref->count, 1, 0);
507 static void __i915_active_activate(struct i915_active *ref)
509 spin_lock_irq(&ref->tree_lock); /* __active_retire() */
510 if (!atomic_fetch_inc(&ref->count))
511 debug_active_activate(ref);
512 spin_unlock_irq(&ref->tree_lock);
515 int i915_active_acquire(struct i915_active *ref)
519 if (i915_active_acquire_if_busy(ref))
522 if (!ref->active) {
523 __i915_active_activate(ref);
527 err = mutex_lock_interruptible(&ref->mutex);
531 if (likely(!i915_active_acquire_if_busy(ref))) {
532 err = ref->active(ref);
534 __i915_active_activate(ref);
537 mutex_unlock(&ref->mutex);
542 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
547 err = i915_active_acquire(ref);
551 active = active_instance(ref, idx);
553 i915_active_release(ref);
557 return 0; /* return with active ref */
560 void i915_active_release(struct i915_active *ref)
562 debug_active_assert(ref);
563 active_retire(ref);
596 static int flush_lazy_signals(struct i915_active *ref)
601 enable_signaling(&ref->excl);
602 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
613 int __i915_active_wait(struct i915_active *ref, int state)
618 if (i915_active_acquire_if_busy(ref)) {
621 err = flush_lazy_signals(ref);
622 i915_active_release(ref);
626 if (___wait_var_event(ref, i915_active_is_idle(ref),
635 flush_work(&ref->work);
663 struct i915_active *ref;
671 if (i915_active_is_idle(wb->ref)) {
680 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
688 GEM_BUG_ON(i915_active_is_idle(ref));
697 wb->ref = ref;
699 add_wait_queue(__var_waitqueue(ref), &wb->base);
703 static int await_active(struct i915_active *ref,
710 if (!i915_active_acquire_if_busy(ref))
714 rcu_access_pointer(ref->excl.fence)) {
715 err = __await_active(&ref->excl, fn, arg);
723 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
731 err = flush_lazy_signals(ref);
735 err = __await_barrier(ref, barrier);
741 i915_active_release(ref);
751 struct i915_active *ref,
754 return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
764 struct i915_active *ref,
767 return await_active(ref, flags, sw_await_fence, fence, fence);
770 void i915_active_fini(struct i915_active *ref)
772 debug_active_fini(ref);
773 GEM_BUG_ON(atomic_read(&ref->count));
774 GEM_BUG_ON(work_pending(&ref->work));
775 mutex_destroy(&ref->mutex);
777 if (ref->cache)
779 kmem_cache_free(slab_cache, ref->cache);
781 pool_put(&slab_cache, ref->cache);
790 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
794 if (RB_EMPTY_ROOT(&ref->tree))
797 GEM_BUG_ON(i915_active_is_idle(ref));
806 if (ref->cache && is_idle_barrier(ref->cache, idx)) {
807 p = &ref->cache->node;
812 p = ref->tree.rb_node;
857 ____active_del_barrier(ref, node, engine))
864 spin_lock_irq(&ref->tree_lock);
865 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
866 if (p == &ref->cache->node)
867 WRITE_ONCE(ref->cache, NULL);
868 spin_unlock_irq(&ref->tree_lock);
873 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
880 GEM_BUG_ON(i915_active_is_idle(ref));
883 while (!llist_empty(&ref->preallocated_barriers))
899 node = reuse_idle_barrier(ref, idx);
913 node->ref = ref;
928 __i915_active_acquire(ref);
940 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
941 llist_add_batch(first, last, &ref->preallocated_barriers);
951 atomic_dec(&ref->count);
963 void i915_active_acquire_barrier(struct i915_active *ref)
968 GEM_BUG_ON(i915_active_is_idle(ref));
976 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
981 spin_lock_irqsave_nested(&ref->tree_lock, flags,
984 p = &ref->tree.rb_node;
997 rb_insert_color(&node->node, &ref->tree);
998 spin_unlock_irqrestore(&ref->tree_lock, flags);
1165 struct kref ref;
1168 struct i915_active *i915_active_get(struct i915_active *ref)
1170 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1172 kref_get(&aa->ref);
1176 static void auto_release(struct kref *ref)
1178 struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1184 void i915_active_put(struct i915_active *ref)
1186 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1188 kref_put(&aa->ref, auto_release);
1191 static int auto_active(struct i915_active *ref)
1193 i915_active_get(ref);
1197 static void auto_retire(struct i915_active *ref)
1199 i915_active_put(ref);
1210 kref_init(&aa->ref);