]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Keep the most recently used active-fence upon discard
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 31 Jul 2020 08:50:12 +0000 (09:50 +0100)
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Mon, 7 Sep 2020 10:18:36 +0000 (13:18 +0300)
Whenever an i915_active idles, we prune its tree of old fence slots to
prevent a gradual leak should it be used to track many, many timelines.
The downside is that we then have to frequently reallocate the rbtree.
A compromise is that we keep the most recently used fence slot, and
reuse that for the next active reference as that is the most likely
timeline to be reused.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Thomas Hellström <thomas.hellstrom@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200731085015.32368-4-chris@chris-wilson.co.uk
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_active.h

index 9890db303fed1941354753acd870c4476299bb36..e63578c80f534a541e7202467a80880257d11d08 100644 (file)
@@ -130,8 +130,8 @@ static inline void debug_active_assert(struct i915_active *ref) { }
 static void
 __active_retire(struct i915_active *ref)
 {
+       struct rb_root root = RB_ROOT;
        struct active_node *it, *n;
-       struct rb_root root;
        unsigned long flags;
 
        GEM_BUG_ON(i915_active_is_idle(ref));
@@ -143,9 +143,21 @@ __active_retire(struct i915_active *ref)
        GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
        debug_active_deactivate(ref);
 
-       root = ref->tree;
-       ref->tree = RB_ROOT;
-       ref->cache = NULL;
+       /* Even if we have not used the cache, we may still have a barrier */
+       if (!ref->cache)
+               ref->cache = fetch_node(ref->tree.rb_node);
+
+       /* Keep the MRU cached node for reuse */
+       if (ref->cache) {
+               /* Discard all other nodes in the tree */
+               rb_erase(&ref->cache->node, &ref->tree);
+               root = ref->tree;
+
+               /* Rebuild the tree with only the cached node */
+               rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
+               rb_insert_color(&ref->cache->node, &ref->tree);
+               GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
+       }
 
        spin_unlock_irqrestore(&ref->tree_lock, flags);
 
@@ -156,6 +168,7 @@ __active_retire(struct i915_active *ref)
        /* ... except if you wait on it, you must manage your own references! */
        wake_up_var(ref);
 
+       /* Finally free the discarded timeline tree  */
        rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
                GEM_BUG_ON(i915_active_fence_isset(&it->base));
                kmem_cache_free(global.slab_cache, it);
@@ -745,16 +758,16 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
        return await_active(ref, flags, sw_await_fence, fence, fence);
 }
 
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
 void i915_active_fini(struct i915_active *ref)
 {
        debug_active_fini(ref);
        GEM_BUG_ON(atomic_read(&ref->count));
        GEM_BUG_ON(work_pending(&ref->work));
-       GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
        mutex_destroy(&ref->mutex);
+
+       if (ref->cache)
+               kmem_cache_free(global.slab_cache, ref->cache);
 }
-#endif
 
 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
 {
index 73ded3c52a04ba64451213d7a40bbcc9d7317c2d..b9e0394e29752f9c32c6a68331bad8372ac0fe7f 100644 (file)
@@ -217,11 +217,7 @@ i915_active_is_idle(const struct i915_active *ref)
        return !atomic_read(&ref->count);
 }
 
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
 void i915_active_fini(struct i915_active *ref);
-#else
-static inline void i915_active_fini(struct i915_active *ref) { }
-#endif
 
 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
                                            struct intel_engine_cs *engine);