]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: L3 cache remapping is part of context switching
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 28 Apr 2016 08:56:42 +0000 (09:56 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 28 Apr 2016 11:17:32 +0000 (12:17 +0100)
Move the i915_gem_l3_remap function such that it next to the context
switching, which is where we perform the L3 remap.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1461833819-3991-8-git-send-email-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c

index 457fb8f548ed8814a8409849cf27998e000c868f..e83ee118843ae9719ce37db79755f7a134a2a053 100644 (file)
@@ -4738,37 +4738,6 @@ err:
        return ret;
 }
 
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
-{
-       struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
-       int i, ret;
-
-       if (!HAS_L3_DPF(dev) || !remap_info)
-               return 0;
-
-       ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
-       if (ret)
-               return ret;
-
-       /*
-        * Note: We do not worry about the concurrent register cacheline hang
-        * here because no other code should access these registers other than
-        * at initialization time.
-        */
-       for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
-               intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
-               intel_ring_emit(engine, remap_info[i]);
-       }
-
-       intel_ring_advance(engine);
-
-       return ret;
-}
-
 void i915_gem_init_swizzling(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
index fb269e4dff3fd1c2cb7b4ab103717d4f88fce7ba..ec0122bc42632aca8a105d87472eb97ed2f58df6 100644 (file)
@@ -601,6 +601,37 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
        return ret;
 }
 
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
+{
+       struct intel_engine_cs *engine = req->engine;
+       struct drm_device *dev = engine->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
+       int i, ret;
+
+       if (!HAS_L3_DPF(dev) || !remap_info)
+               return 0;
+
+       ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
+       if (ret)
+               return ret;
+
+       /*
+        * Note: We do not worry about the concurrent register cacheline hang
+        * here because no other code should access these registers other than
+        * at initialization time.
+        */
+       for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
+               intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
+               intel_ring_emit(engine, remap_info[i]);
+       }
+
+       intel_ring_advance(engine);
+
+       return ret;
+}
+
 static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
                                   struct intel_context *to)
 {