]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Compartmentalize i915_ggtt_init_hw
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>
Fri, 21 Jun 2019 07:07:57 +0000 (08:07 +0100)
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>
Fri, 21 Jun 2019 12:48:34 +0000 (13:48 +0100)
Having made start to better code compartmentalization by introducing
struct intel_gt, continue the theme elsewhere in code by making functions
take parameters take what logically makes most sense for them instead of
the global struct drm_i915_private.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-19-tvrtko.ursulin@linux.intel.com
drivers/gpu/drm/i915/i915_gem_gtt.c

index 629683c52e508bccfdb24a33186e0f0fa871963e..6f86c8e051fedf938c50ef1f29a8040dcb71983e 100644 (file)
@@ -3516,45 +3516,65 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
        return 0;
 }
 
-/**
- * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev_priv: i915 device
- */
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
+static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
 {
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       int ret;
+       ggtt->vm.cleanup(&ggtt->vm);
+}
 
-       stash_init(&dev_priv->mm.wc_stash);
+static int ggtt_init_hw(struct i915_ggtt *ggtt)
+{
+       struct drm_i915_private *i915 = ggtt->vm.i915;
+       int ret = 0;
+
+       mutex_lock(&i915->drm.struct_mutex);
 
-       /* Note that we use page colouring to enforce a guard page at the
-        * end of the address space. This is required as the CS may prefetch
-        * beyond the end of the batch buffer, across the page boundary,
-        * and beyond the end of the GTT if we do not provide a guard.
-        */
-       mutex_lock(&dev_priv->drm.struct_mutex);
        i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
 
        ggtt->vm.is_ggtt = true;
 
        /* Only VLV supports read-only GGTT mappings */
-       ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
+       ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
 
-       if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
+       if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
                ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
-       mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
-                               dev_priv->ggtt.gmadr.start,
-                               dev_priv->ggtt.mappable_end)) {
+       if (!io_mapping_init_wc(&ggtt->iomap,
+                               ggtt->gmadr.start,
+                               ggtt->mappable_end)) {
+               ggtt_cleanup_hw(ggtt);
                ret = -EIO;
-               goto out_gtt_cleanup;
+               goto out;
        }
 
        ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
 
        i915_ggtt_init_fences(ggtt);
 
+out:
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       return ret;
+}
+
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @dev_priv: i915 device
+ */
+int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
+{
+       int ret;
+
+       stash_init(&dev_priv->mm.wc_stash);
+
+       /* Note that we use page colouring to enforce a guard page at the
+        * end of the address space. This is required as the CS may prefetch
+        * beyond the end of the batch buffer, across the page boundary,
+        * and beyond the end of the GTT if we do not provide a guard.
+        */
+       ret = ggtt_init_hw(&dev_priv->ggtt);
+       if (ret)
+               return ret;
+
        /*
         * Initialise stolen early so that we may reserve preallocated
         * objects for the BIOS to KMS transition.
@@ -3566,7 +3586,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
        return 0;
 
 out_gtt_cleanup:
-       ggtt->vm.cleanup(&ggtt->vm);
+       ggtt_cleanup_hw(&dev_priv->ggtt);
        return ret;
 }