]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Force reset on unready engine
authorMika Kuoppala <mika.kuoppala@linux.intel.com>
Mon, 13 Aug 2018 13:01:16 +0000 (16:01 +0300)
committerMika Kuoppala <mika.kuoppala@linux.intel.com>
Mon, 13 Aug 2018 14:00:00 +0000 (17:00 +0300)
If engine reports that it is not ready for reset, we
give up. Evidence shows that forcing a per engine reset
on an engine which is not reporting to be ready for reset,
can bring it back into a working order. There is risk that
we corrupt the context image currently executing on that
engine. But that is a risk worth taking as if we unblock
the engine, we prevent a whole device wedging in a case
of full gpu reset.

Reset individual engine even if it reports that it is not
prepared for reset, but only if we aim for full gpu reset
and not on first reset attempt.

v2: force reset only on later attempts, readability (Chris)
v3: simplify with adequate caffeine levels (Chris)
v4: comment about risks and migitations (Chris)

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180813130116.7250-1-mika.kuoppala@linux.intel.com
drivers/gpu/drm/i915/intel_uncore.c

index 027d14574bfa135a86ed2d5f0802bb67016502f9..20f2f5ad9c3f972966eb4c3199b39dc0b70b3288 100644 (file)
@@ -2085,7 +2085,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
        return ret;
 }
 
-static int gen8_reset_engine_start(struct intel_engine_cs *engine)
+static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
        int ret;
@@ -2105,7 +2105,7 @@ static int gen8_reset_engine_start(struct intel_engine_cs *engine)
        return ret;
 }
 
-static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
+static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
@@ -2113,29 +2113,50 @@ static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
                      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
 }
 
+static int reset_engines(struct drm_i915_private *i915,
+                        unsigned int engine_mask,
+                        unsigned int retry)
+{
+       if (INTEL_GEN(i915) >= 11)
+               return gen11_reset_engines(i915, engine_mask);
+       else
+               return gen6_reset_engines(i915, engine_mask, retry);
+}
+
 static int gen8_reset_engines(struct drm_i915_private *dev_priv,
                              unsigned int engine_mask,
                              unsigned int retry)
 {
        struct intel_engine_cs *engine;
+       const bool reset_non_ready = retry >= 1;
        unsigned int tmp;
        int ret;
 
        for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
-               if (gen8_reset_engine_start(engine)) {
-                       ret = -EIO;
-                       goto not_ready;
-               }
+               ret = gen8_engine_reset_prepare(engine);
+               if (ret && !reset_non_ready)
+                       goto skip_reset;
+
+               /*
+                * If this is not the first failed attempt to prepare,
+                * we decide to proceed anyway.
+                *
+                * By doing so we risk context corruption and with
+                * some gens (kbl), possible system hang if reset
+                * happens during active bb execution.
+                *
+                * We rather take context corruption instead of
+                * failed reset with a wedged driver/gpu. And
+                * active bb execution case should be covered by
+                * i915_stop_engines we have before the reset.
+                */
        }
 
-       if (INTEL_GEN(dev_priv) >= 11)
-               ret = gen11_reset_engines(dev_priv, engine_mask);
-       else
-               ret = gen6_reset_engines(dev_priv, engine_mask, retry);
+       ret = reset_engines(dev_priv, engine_mask, retry);
 
-not_ready:
+skip_reset:
        for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
-               gen8_reset_engine_cancel(engine);
+               gen8_engine_reset_cancel(engine);
 
        return ret;
 }
@@ -2164,12 +2185,15 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
                return NULL;
 }
 
-int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned int engine_mask)
+int intel_gpu_reset(struct drm_i915_private *dev_priv,
+                   const unsigned int engine_mask)
 {
        reset_func reset = intel_get_gpu_reset(dev_priv);
        unsigned int retry;
        int ret;
 
+       GEM_BUG_ON(!engine_mask);
+
        /*
         * We want to perform per-engine reset from atomic context (e.g.
         * softirq), which imposes the constraint that we cannot sleep.