]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Extend intel_wakeref to support delayed puts
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 23 Mar 2020 10:32:21 +0000 (10:32 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 23 Mar 2020 12:51:05 +0000 (12:51 +0000)
In some cases we want to hold onto the wakeref for a little after the
last user so that we can avoid having to drop and then immediately
reacquire it. Allow the last user to specify if they would like to keep
the wakeref alive for a short hysteresis.

v2: Embrace bitfield.h for adjustable flags.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200323103221.14444-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_engine_pm.h
drivers/gpu/drm/i915/gt/intel_gt_requests.c
drivers/gpu/drm/i915/intel_wakeref.c
drivers/gpu/drm/i915/intel_wakeref.h

index e52c2b0cb24518a28dd7c22cad8ad49b6e417d59..418df0a1314564bbe081a33f3f973c44150027cb 100644 (file)
@@ -37,6 +37,12 @@ static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
        intel_wakeref_put_async(&engine->wakeref);
 }
 
+static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
+                                            unsigned long delay)
+{
+       intel_wakeref_put_delay(&engine->wakeref, delay);
+}
+
 static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
 {
        intel_wakeref_unlock_wait(&engine->wakeref);
index 24c99d0838af6e23e7eb6295139b852d02ec538c..835ec184763e55ae4c43727d0f33e265b29968ac 100644 (file)
@@ -38,7 +38,7 @@ static bool flush_submission(struct intel_gt *gt)
        for_each_engine(engine, gt, id) {
                intel_engine_flush_submission(engine);
                active |= flush_work(&engine->retire_work);
-               active |= flush_work(&engine->wakeref.work);
+               active |= flush_delayed_work(&engine->wakeref.work);
        }
 
        return active;
index 8fbf6f4d3f26b7fe24a941dc13669d093a5f45d9..dfd87d082218078cb98ae368e812dec337f43cb3 100644 (file)
@@ -70,11 +70,12 @@ unlock:
 
 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
 {
-       INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
+       INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
 
        /* Assume we are not in process context and so cannot sleep. */
        if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
-               schedule_work(&wf->work);
+               mod_delayed_work(system_wq, &wf->work,
+                                FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
                return;
        }
 
@@ -83,7 +84,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
 
 static void __intel_wakeref_put_work(struct work_struct *wrk)
 {
-       struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
+       struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
 
        if (atomic_add_unless(&wf->count, -1, 1))
                return;
@@ -104,8 +105,9 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
        atomic_set(&wf->count, 0);
        wf->wakeref = 0;
 
-       INIT_WORK(&wf->work, __intel_wakeref_put_work);
-       lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
+       INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
+       lockdep_init_map(&wf->work.work.lockdep_map,
+                        "wakeref.work", &key->work, 0);
 }
 
 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
index 7d1e676b71ef7d4be0dffccebc13e7e074f44360..545c8f277c46013f6b6326d03e23fda372d99d70 100644 (file)
@@ -8,6 +8,7 @@
 #define INTEL_WAKEREF_H
 
 #include <linux/atomic.h>
+#include <linux/bitfield.h>
 #include <linux/bits.h>
 #include <linux/lockdep.h>
 #include <linux/mutex.h>
@@ -41,7 +42,7 @@ struct intel_wakeref {
        struct intel_runtime_pm *rpm;
        const struct intel_wakeref_ops *ops;
 
-       struct work_struct work;
+       struct delayed_work work;
 };
 
 struct intel_wakeref_lockclass {
@@ -117,6 +118,11 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
        return atomic_inc_not_zero(&wf->count);
 }
 
+enum {
+       INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
+       __INTEL_WAKEREF_PUT_LAST_BIT__
+};
+
 /**
  * intel_wakeref_put_flags: Release the wakeref
  * @wf: the wakeref
@@ -134,7 +140,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
  */
 static inline void
 __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
-#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
+#define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
+#define INTEL_WAKEREF_PUT_DELAY \
+       GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
 {
        INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
        if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
@@ -154,6 +162,14 @@ intel_wakeref_put_async(struct intel_wakeref *wf)
        __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
 }
 
+static inline void
+intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
+{
+       __intel_wakeref_put(wf,
+                           INTEL_WAKEREF_PUT_ASYNC |
+                           FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
+}
+
 /**
  * intel_wakeref_lock: Lock the wakeref (mutex)
  * @wf: the wakeref
@@ -194,7 +210,7 @@ intel_wakeref_unlock_wait(struct intel_wakeref *wf)
 {
        mutex_lock(&wf->mutex);
        mutex_unlock(&wf->mutex);
-       flush_work(&wf->work);
+       flush_delayed_work(&wf->work);
 }
 
 /**