]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Split i915_active.mutex into an irq-safe spinlock for the rbtree
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 14 Nov 2019 17:25:35 +0000 (17:25 +0000)
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Wed, 20 Nov 2019 09:53:00 +0000 (11:53 +0200)
As we want to be able to run inside atomic context for retiring the
i915_active, and we are no longer allowed to abuse mutex_trylock, split
the tree management portion of i915_active.mutex into an irq-safe
spinlock.

References: ffa070fa9b938 ("locking/mutex: Complain upon mutex API misuse in IRQ contexts")
References: https://bugs.freedesktop.org/show_bug.cgi?id=111626
Fixes: fd6853792ac5 ("drm/i915: Push the i915_active.retire into a worker")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191114172535.1116-1-chris@chris-wilson.co.uk
(cherry picked from commit c9ad602feabe4271d2adf1bdae5d8b20c2dc84f1)
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_active_types.h

index 207383dda84db329a2ba43b0edce6691858002fd..5448f37c81024a8210ed25809ca9f1f395033ae4 100644 (file)
@@ -91,14 +91,15 @@ static void debug_active_init(struct i915_active *ref)
 
 static void debug_active_activate(struct i915_active *ref)
 {
-       lockdep_assert_held(&ref->mutex);
+       spin_lock_irq(&ref->tree_lock);
        if (!atomic_read(&ref->count)) /* before the first inc */
                debug_object_activate(ref, &active_debug_desc);
+       spin_unlock_irq(&ref->tree_lock);
 }
 
 static void debug_active_deactivate(struct i915_active *ref)
 {
-       lockdep_assert_held(&ref->mutex);
+       lockdep_assert_held(&ref->tree_lock);
        if (!atomic_read(&ref->count)) /* after the last dec */
                debug_object_deactivate(ref, &active_debug_desc);
 }
@@ -128,29 +129,22 @@ __active_retire(struct i915_active *ref)
 {
        struct active_node *it, *n;
        struct rb_root root;
-       bool retire = false;
+       unsigned long flags;
 
-       lockdep_assert_held(&ref->mutex);
        GEM_BUG_ON(i915_active_is_idle(ref));
 
        /* return the unused nodes to our slabcache -- flushing the allocator */
-       if (atomic_dec_and_test(&ref->count)) {
-               debug_active_deactivate(ref);
-               root = ref->tree;
-               ref->tree = RB_ROOT;
-               ref->cache = NULL;
-               retire = true;
-       }
-
-       mutex_unlock(&ref->mutex);
-       if (!retire)
+       if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
                return;
 
        GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
-       rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
-               GEM_BUG_ON(i915_active_fence_isset(&it->base));
-               kmem_cache_free(global.slab_cache, it);
-       }
+       debug_active_deactivate(ref);
+
+       root = ref->tree;
+       ref->tree = RB_ROOT;
+       ref->cache = NULL;
+
+       spin_unlock_irqrestore(&ref->tree_lock, flags);
 
        /* After the final retire, the entire struct may be freed */
        if (ref->retire)
@@ -158,6 +152,11 @@ __active_retire(struct i915_active *ref)
 
        /* ... except if you wait on it, you must manage your own references! */
        wake_up_var(ref);
+
+       rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
+               GEM_BUG_ON(i915_active_fence_isset(&it->base));
+               kmem_cache_free(global.slab_cache, it);
+       }
 }
 
 static void
@@ -169,7 +168,6 @@ active_work(struct work_struct *wrk)
        if (atomic_add_unless(&ref->count, -1, 1))
                return;
 
-       mutex_lock(&ref->mutex);
        __active_retire(ref);
 }
 
@@ -180,9 +178,7 @@ active_retire(struct i915_active *ref)
        if (atomic_add_unless(&ref->count, -1, 1))
                return;
 
-       /* If we are inside interrupt context (fence signaling), defer */
-       if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
-           !mutex_trylock(&ref->mutex)) {
+       if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
                queue_work(system_unbound_wq, &ref->work);
                return;
        }
@@ -227,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
        if (!prealloc)
                return NULL;
 
-       mutex_lock(&ref->mutex);
+       spin_lock_irq(&ref->tree_lock);
        GEM_BUG_ON(i915_active_is_idle(ref));
 
        parent = NULL;
@@ -257,7 +253,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
 
 out:
        ref->cache = node;
-       mutex_unlock(&ref->mutex);
+       spin_unlock_irq(&ref->tree_lock);
 
        BUILD_BUG_ON(offsetof(typeof(*node), base));
        return &node->base;
@@ -278,8 +274,10 @@ void __i915_active_init(struct i915_active *ref,
        if (bits & I915_ACTIVE_MAY_SLEEP)
                ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
 
+       spin_lock_init(&ref->tree_lock);
        ref->tree = RB_ROOT;
        ref->cache = NULL;
+
        init_llist_head(&ref->preallocated_barriers);
        atomic_set(&ref->count, 0);
        __mutex_init(&ref->mutex, "i915_active", key);
@@ -510,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
        if (RB_EMPTY_ROOT(&ref->tree))
                return NULL;
 
-       mutex_lock(&ref->mutex);
+       spin_lock_irq(&ref->tree_lock);
        GEM_BUG_ON(i915_active_is_idle(ref));
 
        /*
@@ -575,7 +573,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
                        goto match;
        }
 
-       mutex_unlock(&ref->mutex);
+       spin_unlock_irq(&ref->tree_lock);
 
        return NULL;
 
@@ -583,7 +581,7 @@ match:
        rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
        if (p == &ref->cache->node)
                ref->cache = NULL;
-       mutex_unlock(&ref->mutex);
+       spin_unlock_irq(&ref->tree_lock);
 
        return rb_entry(p, struct active_node, node);
 }
@@ -664,6 +662,7 @@ unwind:
 void i915_active_acquire_barrier(struct i915_active *ref)
 {
        struct llist_node *pos, *next;
+       unsigned long flags;
 
        GEM_BUG_ON(i915_active_is_idle(ref));
 
@@ -673,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
         * populated by i915_request_add_active_barriers() to point to the
         * request that will eventually release them.
         */
-       mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+       spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
        llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
                struct active_node *node = barrier_from_ll(pos);
                struct intel_engine_cs *engine = barrier_to_engine(node);
@@ -699,7 +698,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
                llist_add(barrier_to_ll(node), &engine->barrier_tasks);
                intel_engine_pm_put(engine);
        }
-       mutex_unlock(&ref->mutex);
+       spin_unlock_irqrestore(&ref->tree_lock, flags);
 }
 
 void i915_request_add_active_barriers(struct i915_request *rq)
index d89a74c142c661fa7bbaec0b2c1e87c076172688..96aed0ee700af753ee1b21d6002445a2a3b6a7f5 100644 (file)
@@ -48,6 +48,7 @@ struct i915_active {
        atomic_t count;
        struct mutex mutex;
 
+       spinlock_t tree_lock;
        struct active_node *cache;
        struct rb_root tree;