]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/gem: Almagamate clflushes on suspend
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 19 Jan 2021 21:43:31 +0000 (21:43 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 20 Jan 2021 20:46:35 +0000 (20:46 +0000)
When flushing objects larger than the CPU cache it is preferrable to use
a single wbinvd() rather than overlapping clflush(). At runtime, we
avoid wbinvd() due to its system-wide latencies, but during
singlethreaded suspend, no one will observe the imposed latency and we
can opt for the faster wbinvd to clear all objects in a single hit.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210119214336.1463-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_pm.c

index 40d3e40500fa002ce0b4dcdbfb9840314f9550fc..215766cc22bf48ec53398bd4870a4e470302ea8e 100644 (file)
 
 #include "i915_drv.h"
 
+#if defined(CONFIG_X86)
+#include <asm/smp.h>
+#else
+#define wbinvd_on_all_cpus() \
+       pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
+#endif
+
 void i915_gem_suspend(struct drm_i915_private *i915)
 {
        GEM_TRACE("%s\n", dev_name(i915->drm.dev));
@@ -32,13 +39,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
        i915_gem_drain_freed_objects(i915);
 }
 
-static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
-{
-       return list_first_entry_or_null(list,
-                                       struct drm_i915_gem_object,
-                                       mm.link);
-}
-
 void i915_gem_suspend_late(struct drm_i915_private *i915)
 {
        struct drm_i915_gem_object *obj;
@@ -48,6 +48,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
                NULL
        }, **phase;
        unsigned long flags;
+       bool flush = false;
 
        /*
         * Neither the BIOS, ourselves or any other kernel
@@ -73,29 +74,15 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 
        spin_lock_irqsave(&i915->mm.obj_lock, flags);
        for (phase = phases; *phase; phase++) {
-               LIST_HEAD(keep);
-
-               while ((obj = first_mm_object(*phase))) {
-                       list_move_tail(&obj->mm.link, &keep);
-
-                       /* Beware the background _i915_gem_free_objects */
-                       if (!kref_get_unless_zero(&obj->base.refcount))
-                               continue;
-
-                       spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-
-                       i915_gem_object_lock(obj, NULL);
-                       drm_WARN_ON(&i915->drm,
-                           i915_gem_object_set_to_gtt_domain(obj, false));
-                       i915_gem_object_unlock(obj);
-                       i915_gem_object_put(obj);
-
-                       spin_lock_irqsave(&i915->mm.obj_lock, flags);
+               list_for_each_entry(obj, *phase, mm.link) {
+                       if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+                               flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
+                       __start_cpu_write(obj); /* presume auto-hibernate */
                }
-
-               list_splice_tail(&keep, *phase);
        }
        spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+       if (flush)
+               wbinvd_on_all_cpus();
 }
 
 void i915_gem_resume(struct drm_i915_private *i915)