return !i915_active_fence_isset(&tl->last_request);
}
+static bool engine_active(const struct intel_engine_cs *engine)
+{
+ return !list_empty(&engine->kernel_context->timeline->requests);
+}
+
static bool flush_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
for_each_engine(engine, gt, id) {
intel_engine_flush_submission(engine);
- active |= flush_work(&engine->retire_work);
- active |= flush_delayed_work(&engine->wakeref.work);
+
+ /* Flush the background retirement and idle barriers */
+ flush_work(&engine->retire_work);
+ flush_delayed_work(&engine->wakeref.work);
+
+ /* Is the idle barrier still outstanding? */
+ active |= engine_active(engine);
}
return active;
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
-
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
GEM_BUG_ON(atomic_read(&tl->active_count));