]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: move i915_active slab to direct module init/exit
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 27 Jul 2021 12:10:28 +0000 (14:10 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 28 Jul 2021 14:45:57 +0000 (16:45 +0200)
With the global kmem_cache shrink infrastructure gone there's nothing
special and we can convert them over.

I'm doing this split up into each patch because there's quite a bit of
noise with removing the static global.slab_cache to just a slab_cache.

v2: Make slab static (Jason, 0day)

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210727121037.2041102-2-daniel.vetter@ffwll.ch
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_active.h
drivers/gpu/drm/i915/i915_globals.c
drivers/gpu/drm/i915/i915_globals.h
drivers/gpu/drm/i915/i915_pci.c

index 91723123ae9f2e6f151768c465380581bf439fee..3103c1e1fd1484c5d123e38948982338466826ba 100644 (file)
@@ -13,7 +13,6 @@
 
 #include "i915_drv.h"
 #include "i915_active.h"
-#include "i915_globals.h"
 
 /*
  * Active refs memory management
  * they idle (when we know the active requests are inactive) and allocate the
  * nodes from a local slab cache to hopefully reduce the fragmentation.
  */
-static struct i915_global_active {
-       struct i915_global base;
-       struct kmem_cache *slab_cache;
-} global;
+static struct kmem_cache *slab_cache;
 
 struct active_node {
        struct rb_node node;
@@ -174,7 +170,7 @@ __active_retire(struct i915_active *ref)
        /* Finally free the discarded timeline tree  */
        rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
                GEM_BUG_ON(i915_active_fence_isset(&it->base));
-               kmem_cache_free(global.slab_cache, it);
+               kmem_cache_free(slab_cache, it);
        }
 }
 
@@ -322,7 +318,7 @@ active_instance(struct i915_active *ref, u64 idx)
         * XXX: We should preallocate this before i915_active_ref() is ever
         *  called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
         */
-       node = kmem_cache_alloc(global.slab_cache, GFP_ATOMIC);
+       node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
        if (!node)
                goto out;
 
@@ -788,7 +784,7 @@ void i915_active_fini(struct i915_active *ref)
        mutex_destroy(&ref->mutex);
 
        if (ref->cache)
-               kmem_cache_free(global.slab_cache, ref->cache);
+               kmem_cache_free(slab_cache, ref->cache);
 }
 
 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
@@ -908,7 +904,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
                node = reuse_idle_barrier(ref, idx);
                rcu_read_unlock();
                if (!node) {
-                       node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+                       node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
                        if (!node)
                                goto unwind;
 
@@ -956,7 +952,7 @@ unwind:
                atomic_dec(&ref->count);
                intel_engine_pm_put(barrier_to_engine(node));
 
-               kmem_cache_free(global.slab_cache, node);
+               kmem_cache_free(slab_cache, node);
        }
        return -ENOMEM;
 }
@@ -1176,21 +1172,16 @@ struct i915_active *i915_active_create(void)
 #include "selftests/i915_active.c"
 #endif
 
-static void i915_global_active_exit(void)
+void i915_active_module_exit(void)
 {
-       kmem_cache_destroy(global.slab_cache);
+       kmem_cache_destroy(slab_cache);
 }
 
-static struct i915_global_active global = { {
-       .exit = i915_global_active_exit,
-} };
-
-int __init i915_global_active_init(void)
+int __init i915_active_module_init(void)
 {
-       global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
-       if (!global.slab_cache)
+       slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
+       if (!slab_cache)
                return -ENOMEM;
 
-       i915_global_register(&global.base);
        return 0;
 }
index d0feda68b874f9516d8997c67c2570e3be66495f..5fcdb0e2bc9e163a04557cc4164183fe42e6f16c 100644 (file)
@@ -247,4 +247,7 @@ static inline int __i915_request_await_exclusive(struct i915_request *rq,
        return err;
 }
 
+void i915_active_module_exit(void);
+int i915_active_module_init(void);
+
 #endif /* _I915_ACTIVE_H_ */
index 91198f5b0a06a8a7cf0624949fc313ad2d68b71d..a53135ee831db4bac4b58344be1e74c8ae31f061 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 
-#include "i915_active.h"
 #include "i915_buddy.h"
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_object.h"
@@ -34,7 +33,6 @@ static void __i915_globals_cleanup(void)
 }
 
 static __initconst int (* const initfn[])(void) = {
-       i915_global_active_init,
        i915_global_buddy_init,
        i915_global_context_init,
        i915_global_gem_context_init,
index 9e6b4fd0752892cc23d736bee75089fb44393742..d80901ba75e3dd11eb759a26f1497f9f899ce897 100644 (file)
@@ -23,7 +23,6 @@ int i915_globals_init(void);
 void i915_globals_exit(void);
 
 /* constructors */
-int i915_global_active_init(void);
 int i915_global_context_init(void);
 int i915_global_gem_context_init(void);
 int i915_global_objects_init(void);
index dbaa6d5006fb60f2e511c4cda9c90379401bb4b8..930dd93de7b039c46410a6104b4fbdc02abba442 100644 (file)
@@ -30,6 +30,7 @@
 
 #include "display/intel_fbdev.h"
 
+#include "i915_active.h"
 #include "i915_drv.h"
 #include "i915_perf.h"
 #include "i915_globals.h"
@@ -1259,6 +1260,7 @@ static const struct {
    void (*exit)(void);
 } init_funcs[] = {
        { i915_check_nomodeset, NULL },
+       { i915_active_module_init, i915_active_module_exit },
        { i915_globals_init, i915_globals_exit },
        { i915_mock_selftests, NULL },
        { i915_pmu_init, i915_pmu_exit },