]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/gt: Restore Cherryview back to full-ppgtt
authorChris Wilson <chris@chris-wilson.co.uk>
Sun, 10 May 2020 10:24:31 +0000 (11:24 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 11 May 2020 16:26:38 +0000 (17:26 +0100)
This reverts commit 0b718ba1e884f64dce27c19311dd2859b87e56b9.

There are still some residual issues with asynchronous binding and
execution, but since commit 92581f9fb99c ("drm/i915: Immediately execute
the fenced work") we prefer not to use asynchronous binds, and the
remaining issues do not seem restricted to Cherryview [at least the ones
seen over a few dozen CI runs, less frequent issues are sure to be
discovered!]

These issues seem to be mitigated, if not eliminated entirely, by the
previous commit 84eac0c65940 ("drm/i915/gt: Force pte cacheline to main
memory").

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200510102431.21959-3-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/i915_pci.c

index ed45fc40f8842a15dc00062ad30426784131cae7..15716e4d6b76f542f016aade8804cf22f127fb84 100644 (file)
@@ -3522,6 +3522,54 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
        return 0;
 }
 
+static int emit_pdps(struct i915_request *rq)
+{
+       const struct intel_engine_cs * const engine = rq->engine;
+       struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
+       int err, i;
+       u32 *cs;
+
+       GEM_BUG_ON(intel_vgpu_active(rq->i915));
+
+       /*
+        * Beware ye of the dragons, this sequence is magic!
+        *
+        * Small changes to this sequence can cause anything from
+        * GPU hangs to forcewake errors and machine lockups!
+        */
+
+       /* Flush any residual operations from the context load */
+       err = engine->emit_flush(rq, EMIT_FLUSH);
+       if (err)
+               return err;
+
+       /* Magic required to prevent forcewake errors! */
+       err = engine->emit_flush(rq, EMIT_INVALIDATE);
+       if (err)
+               return err;
+
+       cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       /* Ensure the LRI have landed before we invalidate & continue */
+       *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+       for (i = GEN8_3LVL_PDPES; i--; ) {
+               const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+               u32 base = engine->mmio_base;
+
+               *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+               *cs++ = upper_32_bits(pd_daddr);
+               *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+               *cs++ = lower_32_bits(pd_daddr);
+       }
+       *cs++ = MI_NOOP;
+
+       intel_ring_advance(rq, cs);
+
+       return 0;
+}
+
 static int execlists_request_alloc(struct i915_request *request)
 {
        int ret;
@@ -3543,6 +3591,12 @@ static int execlists_request_alloc(struct i915_request *request)
         * to cancel/unwind this request now.
         */
 
+       if (!i915_vm_is_4lvl(request->context->vm)) {
+               ret = emit_pdps(request);
+               if (ret)
+                       return ret;
+       }
+
        /* Unconditionally invalidate GPU caches and TLBs. */
        ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
        if (ret)
index 2fc25ec12c3d9290600c055f22d6c8bf7da268b6..193048ce3c3ac6339a9927a3ba437d2ddbee2abf 100644 (file)
@@ -615,7 +615,7 @@ static const struct intel_device_info chv_info = {
        .has_logical_ring_contexts = 1,
        .display.has_gmch = 1,
        .dma_mask_size = 39,
-       .ppgtt_type = INTEL_PPGTT_ALIASING,
+       .ppgtt_type = INTEL_PPGTT_FULL,
        .ppgtt_size = 32,
        .has_reset_engine = 1,
        .has_snoop = true,