]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Reduce locking around i915_active_acquire_preallocate_barrier()
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 31 Jul 2020 08:50:14 +0000 (09:50 +0100)
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Mon, 7 Sep 2020 10:19:11 +0000 (13:19 +0300)
As the conversion between idle-barrier and full i915_active_fence is
already serialised by explicit memory barriers, we can reduce the
spinlock in i915_active_acquire_preallocate_barrier() for finding an
idle-barrier to reuse to an RCU read lock to ensure the fence remains
valid, only taking the spinlock for the update of the rbtree itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Thomas Hellström <thomas.hellstrom@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200731085015.32368-6-chris@chris-wilson.co.uk
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
drivers/gpu/drm/i915/i915_active.c

index 89c34a69a2eaa12646ce12efaca885c1c0c26b63..b0a6522be3d1401fd6a6d7fcd422a6f32625dc6b 100644 (file)
@@ -807,7 +807,6 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
        if (RB_EMPTY_ROOT(&ref->tree))
                return NULL;
 
-       spin_lock_irq(&ref->tree_lock);
        GEM_BUG_ON(i915_active_is_idle(ref));
 
        /*
@@ -833,9 +832,9 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 
                prev = p;
                if (node->timeline < idx)
-                       p = p->rb_right;
+                       p = READ_ONCE(p->rb_right);
                else
-                       p = p->rb_left;
+                       p = READ_ONCE(p->rb_left);
        }
 
        /*
@@ -872,11 +871,10 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
                        goto match;
        }
 
-       spin_unlock_irq(&ref->tree_lock);
-
        return NULL;
 
 match:
+       spin_lock_irq(&ref->tree_lock);
        rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
        if (p == &ref->cache->node)
                WRITE_ONCE(ref->cache, NULL);
@@ -910,7 +908,9 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
                struct llist_node *prev = first;
                struct active_node *node;
 
+               rcu_read_lock();
                node = reuse_idle_barrier(ref, idx);
+               rcu_read_unlock();
                if (!node) {
                        node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
                        if (!node)