]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/selftests: Flush all active callbacks
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 1 Nov 2019 18:10:22 +0000 (18:10 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Sat, 2 Nov 2019 08:34:53 +0000 (08:34 +0000)
Flushing the outer i915_active is not enough, as we need the barrier to
be applied across all the active dma_fence callbacks. So we must
serialise with each outstanding fence.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112096
References: f79520bb3337 ("drm/i915/selftests: Synchronize checking active status with retirement")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191101181022.25633-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
drivers/gpu/drm/i915/i915_active.h
drivers/gpu/drm/i915/selftests/i915_active.c

index e864406bd2d9ea614768bdbbf34adc05c78a461c..f665a0e23c61f67ff531d39ea47adbfcb0f4d415 100644 (file)
@@ -53,9 +53,7 @@ static struct pulse *pulse_create(void)
 
 static void pulse_unlock_wait(struct pulse *p)
 {
-       mutex_lock(&p->active.mutex);
-       mutex_unlock(&p->active.mutex);
-       flush_work(&p->active.work);
+       i915_active_unlock_wait(&p->active);
 }
 
 static int __live_idle_pulse(struct intel_engine_cs *engine,
index 44859356ce97d2aea65103b34cadad1fdf3faefc..5dd62323b92a7100b0016786933747810c380d69 100644 (file)
@@ -215,5 +215,6 @@ void i915_active_acquire_barrier(struct i915_active *ref);
 void i915_request_add_active_barriers(struct i915_request *rq);
 
 void i915_active_print(struct i915_active *ref, struct drm_printer *m);
+void i915_active_unlock_wait(struct i915_active *ref);
 
 #endif /* _I915_ACTIVE_H_ */
index 260b0ee5d1e378aa9eb4dfd4f14abf1bb76b8ab0..f3fa05c78d7851e3187dec2ea54b2c88794bf612 100644 (file)
@@ -250,3 +250,36 @@ void i915_active_print(struct i915_active *ref, struct drm_printer *m)
                i915_active_release(ref);
        }
 }
+
+static void spin_unlock_wait(spinlock_t *lock)
+{
+       spin_lock_irq(lock);
+       spin_unlock_irq(lock);
+}
+
+void i915_active_unlock_wait(struct i915_active *ref)
+{
+       if (i915_active_acquire_if_busy(ref)) {
+               struct active_node *it, *n;
+
+               rcu_read_lock();
+               rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+                       struct dma_fence *f;
+
+                       /* Wait for all active callbacks */
+                       f = rcu_dereference(it->base.fence);
+                       if (f)
+                               spin_unlock_wait(f->lock);
+               }
+               rcu_read_unlock();
+
+               i915_active_release(ref);
+       }
+
+       /* And wait for the retire callback */
+       mutex_lock(&ref->mutex);
+       mutex_unlock(&ref->mutex);
+
+       /* ... which may have been on a thread instead */
+       flush_work(&ref->work);
+}