]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Extend i915_request_await_active to use all timelines
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 11 Mar 2020 09:20:44 +0000 (09:20 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 11 Mar 2020 10:54:59 +0000 (10:54 +0000)
Extend i915_request_await_active() to be able to asynchronously wait on
all the tracked timelines simultaneously.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200311092044.16353-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_active.h
drivers/gpu/drm/i915/i915_vma.c

index 7b3d6c12ad618c8e081daa8bd8c60e580f79bdf4..c4048628188a259f9967392991a190baac34c795 100644 (file)
@@ -518,25 +518,81 @@ int i915_active_wait(struct i915_active *ref)
        return 0;
 }
 
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+static int __await_active(struct i915_active_fence *active,
+                         int (*fn)(void *arg, struct dma_fence *fence),
+                         void *arg)
+{
+       struct dma_fence *fence;
+
+       if (is_barrier(active)) /* XXX flush the barrier? */
+               return 0;
+
+       fence = i915_active_fence_get(active);
+       if (fence) {
+               int err;
+
+               err = fn(arg, fence);
+               dma_fence_put(fence);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int await_active(struct i915_active *ref,
+                       unsigned int flags,
+                       int (*fn)(void *arg, struct dma_fence *fence),
+                       void *arg)
 {
        int err = 0;
 
+       /* We must always wait for the exclusive fence! */
        if (rcu_access_pointer(ref->excl.fence)) {
-               struct dma_fence *fence;
-
-               rcu_read_lock();
-               fence = dma_fence_get_rcu_safe(&ref->excl.fence);
-               rcu_read_unlock();
-               if (fence) {
-                       err = i915_request_await_dma_fence(rq, fence);
-                       dma_fence_put(fence);
+               err = __await_active(&ref->excl, fn, arg);
+               if (err)
+                       return err;
+       }
+
+       if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+               struct active_node *it, *n;
+
+               rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+                       err = __await_active(&it->base, fn, arg);
+                       if (err)
+                               break;
                }
+               i915_active_release(ref);
+               if (err)
+                       return err;
        }
 
-       /* In the future we may choose to await on all fences */
+       return 0;
+}
 
-       return err;
+static int rq_await_fence(void *arg, struct dma_fence *fence)
+{
+       return i915_request_await_dma_fence(arg, fence);
+}
+
+int i915_request_await_active(struct i915_request *rq,
+                             struct i915_active *ref,
+                             unsigned int flags)
+{
+       return await_active(ref, flags, rq_await_fence, rq);
+}
+
+static int sw_await_fence(void *arg, struct dma_fence *fence)
+{
+       return i915_sw_fence_await_dma_fence(arg, fence, 0,
+                                            GFP_NOWAIT | __GFP_NOWARN);
+}
+
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+                              struct i915_active *ref,
+                              unsigned int flags)
+{
+       return await_active(ref, flags, sw_await_fence, fence);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
index 973ff0447c6c306df235ec44784a3854feea8cf9..b3282ae7913c1db60c0941a000bdef0b756715f1 100644 (file)
@@ -183,7 +183,13 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
 
 int i915_active_wait(struct i915_active *ref);
 
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+                              struct i915_active *ref,
+                              unsigned int flags);
+int i915_request_await_active(struct i915_request *rq,
+                             struct i915_active *ref,
+                             unsigned int flags);
+#define I915_ACTIVE_AWAIT_ALL BIT(0)
 
 int i915_active_acquire(struct i915_active *ref);
 bool i915_active_acquire_if_busy(struct i915_active *ref);
index 3dde671145f7b3e8670d99fa91e4975552eff602..5b3efb43a8ef51e2da95a52d1363094669dd1f58 100644 (file)
@@ -1173,7 +1173,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
        GEM_BUG_ON(!i915_vma_is_pinned(vma));
 
        /* Wait for the vma to be bound before we start! */
-       err = i915_request_await_active(rq, &vma->active);
+       err = i915_request_await_active(rq, &vma->active, 0);
        if (err)
                return err;