]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Flush extra hard after writing relocations through the GTT
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 30 Jul 2019 11:21:51 +0000 (12:21 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 2 Aug 2019 07:38:45 +0000 (08:38 +0100)
Recently discovered in commit 297769a005da ("drm/i915: Use maximum write
flush for pwrite_gtt") was that we needed to our full write barrier
before changing the GGTT PTE to ensure that our indirect writes through
the GTT landed before the PTE changed (and the writes end up in a
different page). That also applies to our GGTT relocation path.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: stable@vger.kernel.org
Reviewed-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190730112151.5633-4-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c

index cbd7c6e3a1f87b53fbe49c0e0bcd39a17debcfbb..4db4463089ce6ea69ec10cbbc2822485cd8ac74b 100644 (file)
@@ -1014,11 +1014,12 @@ static void reloc_cache_reset(struct reloc_cache *cache)
                kunmap_atomic(vaddr);
                i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
        } else {
-               wmb();
+               struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
+               intel_gt_flush_ggtt_writes(ggtt->vm.gt);
                io_mapping_unmap_atomic((void __iomem *)vaddr);
-               if (cache->node.allocated) {
-                       struct i915_ggtt *ggtt = cache_to_ggtt(cache);
 
+               if (cache->node.allocated) {
                        ggtt->vm.clear_range(&ggtt->vm,
                                             cache->node.start,
                                             cache->node.size);
@@ -1073,6 +1074,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
        void *vaddr;
 
        if (cache->vaddr) {
+               intel_gt_flush_ggtt_writes(ggtt->vm.gt);
                io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
        } else {
                struct i915_vma *vma;
@@ -1114,7 +1116,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 
        offset = cache->node.start;
        if (cache->node.allocated) {
-               wmb();
                ggtt->vm.insert_page(&ggtt->vm,
                                     i915_gem_object_get_dma_address(obj, page),
                                     offset, I915_CACHE_NONE, 0);