intel_uc_sanitize(>->uc);
- if (!reset_engines(gt) && !force)
- return;
+ for_each_engine(engine, gt->i915, id)
+ if (engine->reset.prepare)
+ engine->reset.prepare(engine);
+
+ if (reset_engines(gt) || force) {
+ for_each_engine(engine, gt->i915, id)
+ __intel_engine_reset(engine, false);
+ }
for_each_engine(engine, gt->i915, id)
- __intel_engine_reset(engine, false);
+ if (engine->reset.finish)
+ engine->reset.finish(engine);
}
void intel_gt_pm_disable(struct intel_gt *gt)
const u8 num_entries = execlists->csb_size;
u8 head, tail;
+ /*
+ * As we modify our execlists state tracking we require exclusive
+ * access. Either we are inside the tasklet, or the tasklet is disabled
+ * and we assume that is only inside the reset paths and so serialised.
+ */
+ GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
+ !reset_in_progress(execlists));
GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915));
/*
cpu_relax();
}
+static inline bool tasklet_is_locked(const struct tasklet_struct *t)
+{
+ return test_bit(TASKLET_STATE_RUN, &t->state);
+}
+
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))