]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: move scheduler slabs to direct module init/exit
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 27 Jul 2021 12:10:34 +0000 (14:10 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 28 Jul 2021 15:18:56 +0000 (17:18 +0200)
With the global kmem_cache shrink infrastructure gone there's nothing
special and we can convert them over.

I'm doing this split up into each patch because there's quite a bit of
noise with removing the static global.slab_dependencies|priorities to just a
slab_dependencies|priorities.

v2: Make slab static (Jason, 0day)

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210727121037.2041102-8-daniel.vetter@ffwll.ch
drivers/gpu/drm/i915/i915_globals.c
drivers/gpu/drm/i915/i915_globals.h
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_scheduler.c
drivers/gpu/drm/i915/i915_scheduler.h

index 8fffa8d93bc5847d71d8857c4898161c3ef78b8b..8923589057ab70f57da7eb9470c89af1f4318210 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/workqueue.h>
 
 #include "i915_globals.h"
-#include "i915_scheduler.h"
 #include "i915_vma.h"
 
 static LIST_HEAD(globals);
@@ -29,7 +28,6 @@ static void __i915_globals_cleanup(void)
 }
 
 static __initconst int (* const initfn[])(void) = {
-       i915_global_scheduler_init,
        i915_global_vma_init,
 };
 
index 9734740708f42f151b6b8f52ce9db38f993a5421..7a57bce1da052f2a0d37f7772c63274b84af46a6 100644 (file)
@@ -23,8 +23,6 @@ int i915_globals_init(void);
 void i915_globals_exit(void);
 
 /* constructors */
-int i915_global_request_init(void);
-int i915_global_scheduler_init(void);
 int i915_global_vma_init(void);
 
 #endif /* _I915_GLOBALS_H_ */
index 028a12cc8eb87551575d93dac03d4ddd06de108e..7069c3b75ab166a75b6454dab2538fbd99705051 100644 (file)
@@ -39,6 +39,7 @@
 #include "i915_perf.h"
 #include "i915_globals.h"
 #include "i915_selftest.h"
+#include "i915_scheduler.h"
 
 #define PLATFORM(x) .platform = (x)
 #define GEN(x) \
@@ -1270,6 +1271,7 @@ static const struct {
        { i915_gem_context_module_init, i915_gem_context_module_exit },
        { i915_objects_module_init, i915_objects_module_exit },
        { i915_request_module_init, i915_request_module_exit },
+       { i915_scheduler_module_init, i915_scheduler_module_exit },
        { i915_globals_init, i915_globals_exit },
        { i915_mock_selftests, NULL },
        { i915_pmu_init, i915_pmu_exit },
index 17843c2043568b76a64bcd786540dd344223c91b..762127dd56c5387e807e51bd5835869ac5e8a19e 100644 (file)
@@ -7,15 +7,11 @@
 #include <linux/mutex.h>
 
 #include "i915_drv.h"
-#include "i915_globals.h"
 #include "i915_request.h"
 #include "i915_scheduler.h"
 
-static struct i915_global_scheduler {
-       struct i915_global base;
-       struct kmem_cache *slab_dependencies;
-       struct kmem_cache *slab_priorities;
-} global;
+static struct kmem_cache *slab_dependencies;
+static struct kmem_cache *slab_priorities;
 
 static DEFINE_SPINLOCK(schedule_lock);
 
@@ -93,7 +89,7 @@ find_priolist:
        if (prio == I915_PRIORITY_NORMAL) {
                p = &sched_engine->default_priolist;
        } else {
-               p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
+               p = kmem_cache_alloc(slab_priorities, GFP_ATOMIC);
                /* Convert an allocation failure to a priority bump */
                if (unlikely(!p)) {
                        prio = I915_PRIORITY_NORMAL; /* recurses just once */
@@ -122,7 +118,7 @@ find_priolist:
 
 void __i915_priolist_free(struct i915_priolist *p)
 {
-       kmem_cache_free(global.slab_priorities, p);
+       kmem_cache_free(slab_priorities, p);
 }
 
 struct sched_cache {
@@ -320,13 +316,13 @@ void i915_sched_node_reinit(struct i915_sched_node *node)
 static struct i915_dependency *
 i915_dependency_alloc(void)
 {
-       return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
+       return kmem_cache_alloc(slab_dependencies, GFP_KERNEL);
 }
 
 static void
 i915_dependency_free(struct i915_dependency *dep)
 {
-       kmem_cache_free(global.slab_dependencies, dep);
+       kmem_cache_free(slab_dependencies, dep);
 }
 
 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@@ -489,32 +485,27 @@ i915_sched_engine_create(unsigned int subclass)
        return sched_engine;
 }
 
-static void i915_global_scheduler_exit(void)
+void i915_scheduler_module_exit(void)
 {
-       kmem_cache_destroy(global.slab_dependencies);
-       kmem_cache_destroy(global.slab_priorities);
+       kmem_cache_destroy(slab_dependencies);
+       kmem_cache_destroy(slab_priorities);
 }
 
-static struct i915_global_scheduler global = { {
-       .exit = i915_global_scheduler_exit,
-} };
-
-int __init i915_global_scheduler_init(void)
+int __init i915_scheduler_module_init(void)
 {
-       global.slab_dependencies = KMEM_CACHE(i915_dependency,
+       slab_dependencies = KMEM_CACHE(i915_dependency,
                                              SLAB_HWCACHE_ALIGN |
                                              SLAB_TYPESAFE_BY_RCU);
-       if (!global.slab_dependencies)
+       if (!slab_dependencies)
                return -ENOMEM;
 
-       global.slab_priorities = KMEM_CACHE(i915_priolist, 0);
-       if (!global.slab_priorities)
+       slab_priorities = KMEM_CACHE(i915_priolist, 0);
+       if (!slab_priorities)
                goto err_priorities;
 
-       i915_global_register(&global.base);
        return 0;
 
 err_priorities:
-       kmem_cache_destroy(global.slab_priorities);
+       kmem_cache_destroy(slab_priorities);
        return -ENOMEM;
 }
index f4d9811ade5b85bca51817ecc6bf2b61174d4661..0b9b86af6c7f159c2d2b2f261d14f75a1700afb0 100644 (file)
@@ -102,4 +102,7 @@ i915_sched_engine_disabled(struct i915_sched_engine *sched_engine)
        return sched_engine->disabled(sched_engine);
 }
 
+void i915_scheduler_module_exit(void);
+int i915_scheduler_module_init(void);
+
 #endif /* _I915_SCHEDULER_H_ */