]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/gem: Refine occupancy test in kill_context()
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 31 Oct 2019 09:01:04 +0000 (09:01 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 1 Nov 2019 09:44:48 +0000 (09:44 +0000)
Don't just look at the very last request in a queue when deciding if we
need to evict the context from the GPU, as that request may still be in
the submission queue while the rest of the context is running!

Instead, walk back along the queued requests looking for the active
request and checking that.

Fixes: a16f09a47227 ("drm/i915/gem: Cancel contexts when hangchecking is disabled")
Testcase: igt/gem_ctx_persistence/queued
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191031090104.22245-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_context.c

index cbdf2fb326361675e9d88ad6896b250624de4dca..de6e55af82cf71a49edf2f4b9538d207e6cb2e5d 100644 (file)
@@ -333,10 +333,8 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
        return __reset_engine(engine);
 }
 
-static struct intel_engine_cs *
-active_engine(struct dma_fence *fence, struct intel_context *ce)
+static struct intel_engine_cs *__active_engine(struct i915_request *rq)
 {
-       struct i915_request *rq = to_request(fence);
        struct intel_engine_cs *engine, *locked;
 
        /*
@@ -360,6 +358,29 @@ active_engine(struct dma_fence *fence, struct intel_context *ce)
        return engine;
 }
 
+static struct intel_engine_cs *active_engine(struct intel_context *ce)
+{
+       struct intel_engine_cs *engine = NULL;
+       struct i915_request *rq;
+
+       if (!ce->timeline)
+               return NULL;
+
+       rcu_read_lock();
+       list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
+               if (i915_request_completed(rq))
+                       break;
+
+               /* Check with the backend if the request is inflight */
+               engine = __active_engine(rq);
+               if (engine)
+                       break;
+       }
+       rcu_read_unlock();
+
+       return engine;
+}
+
 static void kill_context(struct i915_gem_context *ctx)
 {
        struct i915_gem_engines_iter it;
@@ -383,17 +404,15 @@ static void kill_context(struct i915_gem_context *ctx)
         */
        for_each_gem_engine(ce, __context_engines_static(ctx), it) {
                struct intel_engine_cs *engine;
-               struct dma_fence *fence;
-
-               if (!ce->timeline)
-                       continue;
 
-               fence = i915_active_fence_get(&ce->timeline->last_request);
-               if (!fence)
-                       continue;
-
-               /* Check with the backend if the request is still inflight */
-               engine = active_engine(fence, ce);
+               /*
+                * Check the current active state of this context; if we
+                * are currently executing on the GPU we need to evict
+                * ourselves. On the other hand, if we haven't yet been
+                * submitted to the GPU or if everything is complete,
+                * we have nothing to do.
+                */
+               engine = active_engine(ce);
 
                /* First attempt to gracefully cancel the context */
                if (engine && !__cancel_engine(engine))
@@ -403,8 +422,6 @@ static void kill_context(struct i915_gem_context *ctx)
                         * reset. We hope the collateral damage is worth it.
                         */
                        __reset_context(ctx, engine);
-
-               dma_fence_put(fence);
        }
 }