]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/gt: Check carefully for an idle engine in wait-for-idle
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 23 Apr 2020 08:59:39 +0000 (09:59 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 23 Apr 2020 15:16:32 +0000 (16:16 +0100)
intel_gt_wait_for_idle() tries to wait until all the outstanding requests
are retired and the GPU is idle. As a side effect of retiring requests,
we may submit more work to flush any pm barriers, and so the
wait-for-idle tries to flush the background pm work and catch the new
requests. However, if the work completed in the background before we
were able to flush, it would queue the extra barrier request without us
noticing -- and so we would return from wait-for-idle with one request
remaining. (This breaks e.g. record_default_state where we need to wait
until that barrier is retired, and it may slow suspend down by causing
us to wait on the background retirement worker as opposed to immediately
retiring the barrier.)

However, since we track if there has been a submission since the engine
pm barrier, we can very quickly detect if the idle barrier is still
outstanding.

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1763
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200423085940.28168-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_gt_requests.c

index dec96a731a77c4bf2f94c029499ed7375614bebd..16ff47c83bd577f5764bccea4a94dfa062c8f460 100644 (file)
@@ -26,6 +26,11 @@ static bool retire_requests(struct intel_timeline *tl)
        return !i915_active_fence_isset(&tl->last_request);
 }
 
+static bool engine_active(const struct intel_engine_cs *engine)
+{
+       return !list_empty(&engine->kernel_context->timeline->requests);
+}
+
 static bool flush_submission(struct intel_gt *gt)
 {
        struct intel_engine_cs *engine;
@@ -37,8 +42,13 @@ static bool flush_submission(struct intel_gt *gt)
 
        for_each_engine(engine, gt, id) {
                intel_engine_flush_submission(engine);
-               active |= flush_work(&engine->retire_work);
-               active |= flush_delayed_work(&engine->wakeref.work);
+
+               /* Flush the background retirement and idle barriers */
+               flush_work(&engine->retire_work);
+               flush_delayed_work(&engine->wakeref.work);
+
+               /* Is the idle barrier still outstanding? */
+               active |= engine_active(engine);
        }
 
        return active;
@@ -173,7 +183,6 @@ out_active: spin_lock(&timelines->lock);
                if (atomic_dec_and_test(&tl->active_count))
                        list_del(&tl->link);
 
-
                /* Defer the final release to after the spinlock */
                if (refcount_dec_and_test(&tl->kref.refcount)) {
                        GEM_BUG_ON(atomic_read(&tl->active_count));