#if IS_ENABLED(CONFIG_LOCKDEP)
-static inline void __timeline_mark_lock(struct intel_context *ce)
+static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
{
unsigned long flags;
local_irq_save(flags);
mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
- local_irq_restore(flags);
+
+ return flags;
}
-static inline void __timeline_mark_unlock(struct intel_context *ce)
+static inline void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
+ local_irq_restore(flags);
}
#else
-static inline void __timeline_mark_lock(struct intel_context *ce)
+static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
{
+ return 0;
}
-static inline void __timeline_mark_unlock(struct intel_context *ce)
+static inline void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
}
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
struct i915_request *rq;
+ unsigned long flags;
+ bool result = true;
/* Already inside the kernel context, safe to power down. */
if (engine->wakeref_serial == engine->serial)
* retiring the last request, thus all rings should be empty and
* all timelines idle.
*/
- __timeline_mark_lock(engine->kernel_context);
+ flags = __timeline_mark_lock(engine->kernel_context);
rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
if (IS_ERR(rq))
/* Context switch failed, hope for the best! Maybe reset? */
- return true;
+ goto out_unlock;
intel_timeline_enter(rq->timeline);
__intel_wakeref_defer_park(&engine->wakeref);
__i915_request_queue(rq, NULL);
- __timeline_mark_unlock(engine->kernel_context);
-
- return false;
+ result = false;
+out_unlock:
+ __timeline_mark_unlock(engine->kernel_context, flags);
+ return result;
}
static int __engine_park(struct intel_wakeref *wf)
{
struct intel_gt_timelines *timelines = >->timelines;
struct intel_timeline *tl;
+ unsigned long flags;
if (!test_bit(I915_WEDGED, >->reset.flags))
return true;
*
* No more can be submitted until we reset the wedged bit.
*/
- spin_lock(&timelines->lock);
+ spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
if (!rq)
continue;
- spin_unlock(&timelines->lock);
+ spin_unlock_irqrestore(&timelines->lock, flags);
/*
* All internal dependencies (i915_requests) will have
i915_request_put(rq);
/* Restart iteration after droping lock */
- spin_lock(&timelines->lock);
+ spin_lock_irqsave(&timelines->lock, flags);
tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- spin_unlock(&timelines->lock);
+ spin_unlock_irqrestore(&timelines->lock, flags);
intel_gt_sanitize(gt, false);
void intel_timeline_enter(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
+ unsigned long flags;
lockdep_assert_held(&tl->mutex);
return;
GEM_BUG_ON(!tl->active_count); /* overflow? */
- spin_lock(&timelines->lock);
+ spin_lock_irqsave(&timelines->lock, flags);
list_add(&tl->link, &timelines->active_list);
- spin_unlock(&timelines->lock);
+ spin_unlock_irqrestore(&timelines->lock, flags);
}
void intel_timeline_exit(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
+ unsigned long flags;
lockdep_assert_held(&tl->mutex);
if (--tl->active_count)
return;
- spin_lock(&timelines->lock);
+ spin_lock_irqsave(&timelines->lock, flags);
list_del(&tl->link);
- spin_unlock(&timelines->lock);
+ spin_unlock_irqrestore(&timelines->lock, flags);
/*
* Since this timeline is idle, all bariers upon which we were waiting
static long
wait_for_timelines(struct drm_i915_private *i915,
- unsigned int flags, long timeout)
+ unsigned int wait, long timeout)
{
struct intel_gt_timelines *timelines = &i915->gt.timelines;
struct intel_timeline *tl;
+ unsigned long flags;
- spin_lock(&timelines->lock);
+ spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
if (!rq)
continue;
- spin_unlock(&timelines->lock);
+ spin_unlock_irqrestore(&timelines->lock, flags);
/*
* "Race-to-idle".
* want to complete as quickly as possible to avoid prolonged
* stalls, so allow the gpu to boost to maximum clocks.
*/
- if (flags & I915_WAIT_FOR_IDLE_BOOST)
+ if (wait & I915_WAIT_FOR_IDLE_BOOST)
gen6_rps_boost(rq);
- timeout = i915_request_wait(rq, flags, timeout);
+ timeout = i915_request_wait(rq, wait, timeout);
i915_request_put(rq);
if (timeout < 0)
return timeout;
/* restart after reacquiring the lock */
- spin_lock(&timelines->lock);
+ spin_lock_irqsave(&timelines->lock, flags);
tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- spin_unlock(&timelines->lock);
+ spin_unlock_irqrestore(&timelines->lock, flags);
return timeout;
}
{
struct intel_gt_timelines *timelines = &i915->gt.timelines;
struct intel_timeline *tl, *tn;
+ unsigned long flags;
LIST_HEAD(free);
- spin_lock(&timelines->lock);
+ spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex))
continue;
intel_timeline_get(tl);
GEM_BUG_ON(!tl->active_count);
tl->active_count++; /* pin the list element */
- spin_unlock(&timelines->lock);
+ spin_unlock_irqrestore(&timelines->lock, flags);
retire_requests(tl);
- spin_lock(&timelines->lock);
+ spin_lock_irqsave(&timelines->lock, flags);
/* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link);
list_add(&tl->link, &free);
}
}
- spin_unlock(&timelines->lock);
+ spin_unlock_irqrestore(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);