Lines Matching refs:ref

29 	struct i915_active *ref;  member
77 struct i915_active *ref = addr; in active_debug_hint() local
79 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; in active_debug_hint()
87 static void debug_active_init(struct i915_active *ref) in debug_active_init() argument
89 debug_object_init(ref, &active_debug_desc); in debug_active_init()
92 static void debug_active_activate(struct i915_active *ref) in debug_active_activate() argument
94 lockdep_assert_held(&ref->tree_lock); in debug_active_activate()
95 debug_object_activate(ref, &active_debug_desc); in debug_active_activate()
98 static void debug_active_deactivate(struct i915_active *ref) in debug_active_deactivate() argument
100 lockdep_assert_held(&ref->tree_lock); in debug_active_deactivate()
101 if (!atomic_read(&ref->count)) /* after the last dec */ in debug_active_deactivate()
102 debug_object_deactivate(ref, &active_debug_desc); in debug_active_deactivate()
105 static void debug_active_fini(struct i915_active *ref) in debug_active_fini() argument
107 debug_object_free(ref, &active_debug_desc); in debug_active_fini()
110 static void debug_active_assert(struct i915_active *ref) in debug_active_assert() argument
112 debug_object_assert_init(ref, &active_debug_desc); in debug_active_assert()
117 static inline void debug_active_init(struct i915_active *ref) { } in debug_active_init() argument
118 static inline void debug_active_activate(struct i915_active *ref) { } in debug_active_activate() argument
119 static inline void debug_active_deactivate(struct i915_active *ref) { } in debug_active_deactivate() argument
120 static inline void debug_active_fini(struct i915_active *ref) { } in debug_active_fini() argument
121 static inline void debug_active_assert(struct i915_active *ref) { } in debug_active_assert() argument
126 __active_retire(struct i915_active *ref) in __active_retire() argument
132 GEM_BUG_ON(i915_active_is_idle(ref)); in __active_retire()
135 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags)) in __active_retire()
138 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence)); in __active_retire()
139 debug_active_deactivate(ref); in __active_retire()
142 if (!ref->cache) in __active_retire()
143 ref->cache = fetch_node(ref->tree.rb_node); in __active_retire()
146 if (ref->cache) { in __active_retire()
148 rb_erase(&ref->cache->node, &ref->tree); in __active_retire()
149 root = ref->tree; in __active_retire()
152 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node); in __active_retire()
153 rb_insert_color(&ref->cache->node, &ref->tree); in __active_retire()
154 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node); in __active_retire()
157 ref->cache->timeline = 0; /* needs cmpxchg(u64) */ in __active_retire()
160 spin_unlock_irqrestore(&ref->tree_lock, flags); in __active_retire()
163 if (ref->retire) in __active_retire()
164 ref->retire(ref); in __active_retire()
167 wake_up_var(ref); in __active_retire()
179 struct i915_active *ref = container_of(wrk, typeof(*ref), work); in active_work() local
181 GEM_BUG_ON(!atomic_read(&ref->count)); in active_work()
182 if (atomic_add_unless(&ref->count, -1, 1)) in active_work()
185 __active_retire(ref); in active_work()
189 active_retire(struct i915_active *ref) in active_retire() argument
191 GEM_BUG_ON(!atomic_read(&ref->count)); in active_retire()
192 if (atomic_add_unless(&ref->count, -1, 1)) in active_retire()
195 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) { in active_retire()
196 queue_work(system_unbound_wq, &ref->work); in active_retire()
200 __active_retire(ref); in active_retire()
222 active_retire(container_of(cb, struct active_node, base.cb)->ref); in node_retire()
232 static struct active_node *__active_lookup(struct i915_active *ref, u64 idx) in __active_lookup() argument
245 it = READ_ONCE(ref->cache); in __active_lookup()
270 GEM_BUG_ON(i915_active_is_idle(ref)); in __active_lookup()
272 it = fetch_node(ref->tree.rb_node); in __active_lookup()
279 WRITE_ONCE(ref->cache, it); in __active_lookup()
289 active_instance(struct i915_active *ref, u64 idx) in active_instance() argument
294 node = __active_lookup(ref, idx); in active_instance()
298 spin_lock_irq(&ref->tree_lock); in active_instance()
299 GEM_BUG_ON(i915_active_is_idle(ref)); in active_instance()
302 p = &ref->tree.rb_node; in active_instance()
325 node->ref = ref; in active_instance()
329 rb_insert_color(&node->node, &ref->tree); in active_instance()
332 WRITE_ONCE(ref->cache, node); in active_instance()
333 spin_unlock_irq(&ref->tree_lock); in active_instance()
338 void __i915_active_init(struct i915_active *ref, in __i915_active_init() argument
339 int (*active)(struct i915_active *ref), in __i915_active_init() argument
340 void (*retire)(struct i915_active *ref), in __i915_active_init() argument
345 debug_active_init(ref); in __i915_active_init()
347 ref->flags = flags; in __i915_active_init()
348 ref->active = active; in __i915_active_init()
349 ref->retire = retire; in __i915_active_init()
351 spin_lock_init(&ref->tree_lock); in __i915_active_init()
352 ref->tree = RB_ROOT; in __i915_active_init()
353 ref->cache = NULL; in __i915_active_init()
355 init_llist_head(&ref->preallocated_barriers); in __i915_active_init()
356 atomic_set(&ref->count, 0); in __i915_active_init()
357 __mutex_init(&ref->mutex, "i915_active", mkey); in __i915_active_init()
358 __i915_active_fence_init(&ref->excl, NULL, excl_retire); in __i915_active_init()
359 INIT_WORK(&ref->work, active_work); in __i915_active_init()
361 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0); in __i915_active_init()
365 static bool ____active_del_barrier(struct i915_active *ref, in ____active_del_barrier() argument
408 __active_del_barrier(struct i915_active *ref, struct active_node *node) in __active_del_barrier() argument
410 return ____active_del_barrier(ref, node, barrier_to_engine(node)); in __active_del_barrier()
414 replace_barrier(struct i915_active *ref, struct i915_active_fence *active) in replace_barrier() argument
424 return __active_del_barrier(ref, node_from_active(active)); in replace_barrier()
427 int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) in i915_active_add_request() argument
435 err = i915_active_acquire(ref); in i915_active_add_request()
440 active = active_instance(ref, idx); in i915_active_add_request()
446 if (replace_barrier(ref, active)) { in i915_active_add_request()
448 atomic_dec(&ref->count); in i915_active_add_request()
454 __i915_active_acquire(ref); in i915_active_add_request()
459 i915_active_release(ref); in i915_active_add_request()
464 __i915_active_set_fence(struct i915_active *ref, in __i915_active_set_fence() argument
470 if (replace_barrier(ref, active)) { in __i915_active_set_fence()
477 __i915_active_acquire(ref); in __i915_active_set_fence()
483 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) in i915_active_set_exclusive() argument
486 return __i915_active_set_fence(ref, &ref->excl, f); in i915_active_set_exclusive()
489 bool i915_active_acquire_if_busy(struct i915_active *ref) in i915_active_acquire_if_busy() argument
491 debug_active_assert(ref); in i915_active_acquire_if_busy()
492 return atomic_add_unless(&ref->count, 1, 0); in i915_active_acquire_if_busy()
495 static void __i915_active_activate(struct i915_active *ref) in __i915_active_activate() argument
497 spin_lock_irq(&ref->tree_lock); /* __active_retire() */ in __i915_active_activate()
498 if (!atomic_fetch_inc(&ref->count)) in __i915_active_activate()
499 debug_active_activate(ref); in __i915_active_activate()
500 spin_unlock_irq(&ref->tree_lock); in __i915_active_activate()
503 int i915_active_acquire(struct i915_active *ref) in i915_active_acquire() argument
507 if (i915_active_acquire_if_busy(ref)) in i915_active_acquire()
510 if (!ref->active) { in i915_active_acquire()
511 __i915_active_activate(ref); in i915_active_acquire()
515 err = mutex_lock_interruptible(&ref->mutex); in i915_active_acquire()
519 if (likely(!i915_active_acquire_if_busy(ref))) { in i915_active_acquire()
520 err = ref->active(ref); in i915_active_acquire()
522 __i915_active_activate(ref); in i915_active_acquire()
525 mutex_unlock(&ref->mutex); in i915_active_acquire()
530 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx) in i915_active_acquire_for_context() argument
535 err = i915_active_acquire(ref); in i915_active_acquire_for_context()
539 active = active_instance(ref, idx); in i915_active_acquire_for_context()
541 i915_active_release(ref); in i915_active_acquire_for_context()
548 void i915_active_release(struct i915_active *ref) in i915_active_release() argument
550 debug_active_assert(ref); in i915_active_release()
551 active_retire(ref); in i915_active_release()
584 static int flush_lazy_signals(struct i915_active *ref) in flush_lazy_signals() argument
589 enable_signaling(&ref->excl); in flush_lazy_signals()
590 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { in flush_lazy_signals()
601 int __i915_active_wait(struct i915_active *ref, int state) in __i915_active_wait() argument
606 if (i915_active_acquire_if_busy(ref)) { in __i915_active_wait()
609 err = flush_lazy_signals(ref); in __i915_active_wait()
610 i915_active_release(ref); in __i915_active_wait()
614 if (___wait_var_event(ref, i915_active_is_idle(ref), in __i915_active_wait()
623 flush_work(&ref->work); in __i915_active_wait()
651 struct i915_active *ref; member
659 if (i915_active_is_idle(wb->ref)) { in barrier_wake()
668 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence) in __await_barrier() argument
676 GEM_BUG_ON(i915_active_is_idle(ref)); in __await_barrier()
685 wb->ref = ref; in __await_barrier()
687 add_wait_queue(__var_waitqueue(ref), &wb->base); in __await_barrier()
691 static int await_active(struct i915_active *ref, in await_active() argument
698 if (!i915_active_acquire_if_busy(ref)) in await_active()
702 rcu_access_pointer(ref->excl.fence)) { in await_active()
703 err = __await_active(&ref->excl, fn, arg); in await_active()
711 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { in await_active()
719 err = flush_lazy_signals(ref); in await_active()
723 err = __await_barrier(ref, barrier); in await_active()
729 i915_active_release(ref); in await_active()
739 struct i915_active *ref, in i915_request_await_active() argument
742 return await_active(ref, flags, rq_await_fence, rq, &rq->submit); in i915_request_await_active()
752 struct i915_active *ref, in i915_sw_fence_await_active() argument
755 return await_active(ref, flags, sw_await_fence, fence, fence); in i915_sw_fence_await_active()
758 void i915_active_fini(struct i915_active *ref) in i915_active_fini() argument
760 debug_active_fini(ref); in i915_active_fini()
761 GEM_BUG_ON(atomic_read(&ref->count)); in i915_active_fini()
762 GEM_BUG_ON(work_pending(&ref->work)); in i915_active_fini()
763 mutex_destroy(&ref->mutex); in i915_active_fini()
765 if (ref->cache) in i915_active_fini()
766 kmem_cache_free(slab_cache, ref->cache); in i915_active_fini()
774 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) in reuse_idle_barrier() argument
778 if (RB_EMPTY_ROOT(&ref->tree)) in reuse_idle_barrier()
781 GEM_BUG_ON(i915_active_is_idle(ref)); in reuse_idle_barrier()
790 if (ref->cache && is_idle_barrier(ref->cache, idx)) { in reuse_idle_barrier()
791 p = &ref->cache->node; in reuse_idle_barrier()
796 p = ref->tree.rb_node; in reuse_idle_barrier()
841 ____active_del_barrier(ref, node, engine)) in reuse_idle_barrier()
848 spin_lock_irq(&ref->tree_lock); in reuse_idle_barrier()
849 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ in reuse_idle_barrier()
850 if (p == &ref->cache->node) in reuse_idle_barrier()
851 WRITE_ONCE(ref->cache, NULL); in reuse_idle_barrier()
852 spin_unlock_irq(&ref->tree_lock); in reuse_idle_barrier()
857 int i915_active_acquire_preallocate_barrier(struct i915_active *ref, in i915_active_acquire_preallocate_barrier() argument
864 GEM_BUG_ON(i915_active_is_idle(ref)); in i915_active_acquire_preallocate_barrier()
867 while (!llist_empty(&ref->preallocated_barriers)) in i915_active_acquire_preallocate_barrier()
883 node = reuse_idle_barrier(ref, idx); in i915_active_acquire_preallocate_barrier()
893 node->ref = ref; in i915_active_acquire_preallocate_barrier()
908 __i915_active_acquire(ref); in i915_active_acquire_preallocate_barrier()
920 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); in i915_active_acquire_preallocate_barrier()
921 llist_add_batch(first, last, &ref->preallocated_barriers); in i915_active_acquire_preallocate_barrier()
931 atomic_dec(&ref->count); in i915_active_acquire_preallocate_barrier()
939 void i915_active_acquire_barrier(struct i915_active *ref) in i915_active_acquire_barrier() argument
944 GEM_BUG_ON(i915_active_is_idle(ref)); in i915_active_acquire_barrier()
952 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { in i915_active_acquire_barrier()
957 spin_lock_irqsave_nested(&ref->tree_lock, flags, in i915_active_acquire_barrier()
960 p = &ref->tree.rb_node; in i915_active_acquire_barrier()
973 rb_insert_color(&node->node, &ref->tree); in i915_active_acquire_barrier()
974 spin_unlock_irqrestore(&ref->tree_lock, flags); in i915_active_acquire_barrier()
1141 struct kref ref; member
1144 struct i915_active *i915_active_get(struct i915_active *ref) in i915_active_get() argument
1146 struct auto_active *aa = container_of(ref, typeof(*aa), base); in i915_active_get()
1148 kref_get(&aa->ref); in i915_active_get()
1152 static void auto_release(struct kref *ref) in auto_release() argument
1154 struct auto_active *aa = container_of(ref, typeof(*aa), ref); in auto_release()
1160 void i915_active_put(struct i915_active *ref) in i915_active_put() argument
1162 struct auto_active *aa = container_of(ref, typeof(*aa), base); in i915_active_put()
1164 kref_put(&aa->ref, auto_release); in i915_active_put()
1167 static int auto_active(struct i915_active *ref) in auto_active() argument
1169 i915_active_get(ref); in auto_active()
1173 static void auto_retire(struct i915_active *ref) in auto_retire() argument
1175 i915_active_put(ref); in auto_retire()
1186 kref_init(&aa->ref); in i915_active_create()