]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/gt: Pass intel_gt to pm routines
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 25 Jun 2019 13:01:10 +0000 (14:01 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 25 Jun 2019 19:17:22 +0000 (20:17 +0100)
Switch from passing the i915 container to newly named struct intel_gt.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190625130128.11009-2-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_pm.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gt/intel_engine_pm.c
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_gt_pm.h
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/selftests/i915_gem.c

index cf8edb6822eed476911e118850cae91524b40d78..1c5dfbfad71b6514434fc04ae749090156ce1b78 100644 (file)
@@ -2437,7 +2437,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
         * wakeref that we hold until the GPU has been idle for at least
         * 100ms.
         */
-       intel_gt_pm_get(eb.i915);
+       intel_gt_pm_get(&eb.i915->gt);
 
        err = i915_mutex_lock_interruptible(dev);
        if (err)
@@ -2607,7 +2607,7 @@ err_engine:
 err_unlock:
        mutex_unlock(&dev->struct_mutex);
 err_rpm:
-       intel_gt_pm_put(eb.i915);
+       intel_gt_pm_put(&eb.i915->gt);
        i915_gem_context_put(eb.gem_context);
 err_destroy:
        eb_destroy(&eb);
index 8f721cf0ab9974165f717291d2c919edc0c2755a..ee1f66594a35154ab71f964e89a3133b4953d2f9 100644 (file)
@@ -258,7 +258,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
         * guarantee that the context image is complete. So let's just reset
         * it and start again.
         */
-       intel_gt_resume(i915);
+       intel_gt_resume(&i915->gt);
 
        if (i915_gem_init_hw(i915))
                goto err_wedged;
index 24a3c677ccd51f5c4a150e576e5a6edb7cc0a336..a1f0b235f56b49669ccb53b2fe50a920a468a183 100644 (file)
@@ -379,7 +379,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
 {
        i915_gem_shrinker_unregister(i915);
 
-       intel_gt_pm_get(i915);
+       intel_gt_pm_get(&i915->gt);
 
        cancel_delayed_work_sync(&i915->gem.retire_work);
        flush_work(&i915->gem.idle_work);
@@ -387,7 +387,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
 
 static void restore_retire_worker(struct drm_i915_private *i915)
 {
-       intel_gt_pm_put(i915);
+       intel_gt_pm_put(&i915->gt);
 
        mutex_lock(&i915->drm.struct_mutex);
        igt_flush_test(i915, I915_WAIT_LOCKED);
index 2ce00d3dc42a1a5fa17b2175b78cd3657f88abe1..5253c382034d87cf470d74ed9ecd1802ccd70f93 100644 (file)
@@ -18,7 +18,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
 
        GEM_TRACE("%s\n", engine->name);
 
-       intel_gt_pm_get(engine->i915);
+       intel_gt_pm_get(engine->gt);
 
        /* Pin the default state for fast resets from atomic context. */
        map = NULL;
@@ -129,7 +129,7 @@ static int __engine_park(struct intel_wakeref *wf)
 
        engine->execlists.no_priolist = false;
 
-       intel_gt_pm_put(engine->i915);
+       intel_gt_pm_put(engine->gt);
        return 0;
 }
 
@@ -149,7 +149,7 @@ int intel_engines_resume(struct drm_i915_private *i915)
        enum intel_engine_id id;
        int err = 0;
 
-       intel_gt_pm_get(i915);
+       intel_gt_pm_get(&i915->gt);
        for_each_engine(engine, i915, id) {
                intel_engine_pm_get(engine);
                engine->serial++; /* kernel context lost */
@@ -162,7 +162,7 @@ int intel_engines_resume(struct drm_i915_private *i915)
                        break;
                }
        }
-       intel_gt_pm_put(i915);
+       intel_gt_pm_put(&i915->gt);
 
        return err;
 }
index 6062840b5b46e52d2fb5e8d9362cd66e8b2ab633..ec6b69d014b65ce581a67e8de9dba6ff5f00d766 100644 (file)
@@ -50,9 +50,11 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
        return 0;
 }
 
-void intel_gt_pm_get(struct drm_i915_private *i915)
+void intel_gt_pm_get(struct intel_gt *gt)
 {
-       intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark);
+       struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
+
+       intel_wakeref_get(rpm, &gt->wakeref, intel_gt_unpark);
 }
 
 static int intel_gt_park(struct intel_wakeref *wf)
@@ -75,9 +77,11 @@ static int intel_gt_park(struct intel_wakeref *wf)
        return 0;
 }
 
-void intel_gt_pm_put(struct drm_i915_private *i915)
+void intel_gt_pm_put(struct intel_gt *gt)
 {
-       intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park);
+       struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
+
+       intel_wakeref_put(rpm, &gt->wakeref, intel_gt_park);
 }
 
 void intel_gt_pm_init_early(struct intel_gt *gt)
@@ -96,7 +100,7 @@ static bool reset_engines(struct drm_i915_private *i915)
 
 /**
  * intel_gt_sanitize: called after the GPU has lost power
- * @i915: the i915 device
+ * @gt: the i915 GT container
  * @force: ignore a failed reset and sanitize engine state anyway
  *
  * Anytime we reset the GPU, either with an explicit GPU reset or through a
@@ -104,21 +108,21 @@ static bool reset_engines(struct drm_i915_private *i915)
  * to match. Note that calling intel_gt_sanitize() if the GPU has not
  * been reset results in much confusion!
  */
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
+void intel_gt_sanitize(struct intel_gt *gt, bool force)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
        GEM_TRACE("\n");
 
-       if (!reset_engines(i915) && !force)
+       if (!reset_engines(gt->i915) && !force)
                return;
 
-       for_each_engine(engine, i915, id)
+       for_each_engine(engine, gt->i915, id)
                intel_engine_reset(engine, false);
 }
 
-void intel_gt_resume(struct drm_i915_private *i915)
+void intel_gt_resume(struct intel_gt *gt)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
@@ -129,7 +133,7 @@ void intel_gt_resume(struct drm_i915_private *i915)
         * Only the kernel contexts should remain pinned over suspend,
         * allowing us to fixup the user contexts on their first pin.
         */
-       for_each_engine(engine, i915, id) {
+       for_each_engine(engine, gt->i915, id) {
                struct intel_context *ce;
 
                ce = engine->kernel_context;
index b6049a9078908f9ebd01548b9f89dc7f6a089b0c..4dbb92cf58d794fea18fa7253c1e370a9945fb54 100644 (file)
@@ -9,7 +9,6 @@
 
 #include <linux/types.h>
 
-struct drm_i915_private;
 struct intel_gt;
 
 enum {
@@ -17,12 +16,12 @@ enum {
        INTEL_GT_PARK,
 };
 
-void intel_gt_pm_get(struct drm_i915_private *i915);
-void intel_gt_pm_put(struct drm_i915_private *i915);
+void intel_gt_pm_get(struct intel_gt *gt);
+void intel_gt_pm_put(struct intel_gt *gt);
 
 void intel_gt_pm_init_early(struct intel_gt *gt);
 
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-void intel_gt_resume(struct drm_i915_private *i915);
+void intel_gt_sanitize(struct intel_gt *gt, bool force);
+void intel_gt_resume(struct intel_gt *gt);
 
 #endif /* INTEL_GT_PM_H */
index 3c925af647937883d689e6f90080c1b977cf9d75..e92054e118cc4627354c703dbefca25c8c0eee78 100644 (file)
@@ -714,7 +714,7 @@ static void reset_prepare(struct drm_i915_private *i915)
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
-       intel_gt_pm_get(i915);
+       intel_gt_pm_get(&i915->gt);
        for_each_engine(engine, i915, id)
                reset_prepare_engine(engine);
 
@@ -765,7 +765,7 @@ static void reset_finish(struct drm_i915_private *i915)
                reset_finish_engine(engine);
                intel_engine_signal_breadcrumbs(engine);
        }
-       intel_gt_pm_put(i915);
+       intel_gt_pm_put(&i915->gt);
 }
 
 static void nop_submit_request(struct i915_request *request)
@@ -891,7 +891,7 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
        }
        mutex_unlock(&i915->gt.timelines.mutex);
 
-       intel_gt_sanitize(i915, false);
+       intel_gt_sanitize(&i915->gt, false);
 
        /*
         * Undo nop_submit_request. We prevent all new i915 requests from
index 6241a4615501f3a7a9c053ff587d52c12cc05ec1..f5b7c37c165f4b002eff3625ab2daae164bb4098 100644 (file)
@@ -2377,7 +2377,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        intel_power_domains_resume(dev_priv);
 
-       intel_gt_sanitize(dev_priv, true);
+       intel_gt_sanitize(&dev_priv->gt, true);
 
        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
index e59be5c05e1b72fcdcec280ed37d80625cc75414..deecbe128e5b1e9c6eca2f150d1e7e1ce429974b 100644 (file)
@@ -1157,7 +1157,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
         * it may impact the display and we are uncertain about the stability
         * of the reset, so this could be applied to even earlier gen.
         */
-       intel_gt_sanitize(i915, false);
+       intel_gt_sanitize(&i915->gt, false);
 
        intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
index c6a01a6e87f15b4b2b85945a40a037feb50fecc6..ed0c17bf6613c1c7ea387b9676525efbd9b746c4 100644 (file)
@@ -115,7 +115,7 @@ static void pm_resume(struct drm_i915_private *i915)
         * that runtime-pm just works.
         */
        with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-               intel_gt_sanitize(i915, false);
+               intel_gt_sanitize(&i915->gt, false);
                i915_gem_sanitize(i915);
                i915_gem_resume(i915);
        }