#include "gen8_engine_cs.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_engine_stats.h"
#include "intel_execlists_submission.h"
return lrc_alloc(ce, ce->engine);
}
+static void execlists_context_cancel_request(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = NULL;
+
+ i915_request_active_engine(rq, &engine);
+
+ if (engine && intel_engine_pulse(engine))
+ intel_gt_handle_error(engine->gt, engine->mask, 0,
+ "request cancellation by %s",
+ current->comm);
+}
+
static const struct intel_context_ops execlists_context_ops = {
.flags = COPS_HAS_INFLIGHT,
.alloc = execlists_context_alloc,
+ .cancel_request = execlists_context_cancel_request,
+
.pre_pin = execlists_context_pre_pin,
.pin = execlists_context_pin,
.unpin = lrc_unpin,
.alloc = virtual_context_alloc,
+ .cancel_request = execlists_context_cancel_request,
+
.pre_pin = virtual_context_pre_pin,
.pin = virtual_context_pin,
.unpin = lrc_unpin,
#define SCHED_STATE_DESTROYED BIT(1)
#define SCHED_STATE_PENDING_DISABLE BIT(2)
#define SCHED_STATE_BANNED BIT(3)
+#define SCHED_STATE_BLOCKED_SHIFT 4
+#define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
+#define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
static inline void init_sched_state(struct intel_context *ce)
{
/* Only should be called from guc_lrc_desc_pin() */
ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
}
+static inline u32 context_blocked(struct intel_context *ce)
+{
+ return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
+ SCHED_STATE_BLOCKED_SHIFT;
+}
+
+static inline void incr_context_blocked(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->engine->sched_engine->lock);
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
+
+ GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */
+}
+
+static inline void decr_context_blocked(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->engine->sched_engine->lock);
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
+
+ ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
+}
+
static inline bool context_guc_id_invalid(struct intel_context *ce)
{
return ce->guc_id == GUC_INVALID_LRC_ID;
if (unlikely(err))
goto out;
}
+
+ /*
+ * The request / context will be run on the hardware when scheduling
+ * gets enabled in the unblock.
+ */
+ if (unlikely(context_blocked(ce)))
+ goto out;
+
enabled = context_enabled(ce);
if (!enabled) {
static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
static void guc_signal_context_fence(struct intel_context *ce);
static void guc_cancel_context_requests(struct intel_context *ce);
+static void guc_blocked_fence_complete(struct intel_context *ce);
static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
{
}
intel_context_sched_disable_unpin(ce);
atomic_dec(&guc->outstanding_submission_g2h);
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ guc_blocked_fence_complete(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
intel_context_put(ce);
}
}
lrc_post_unpin(ce);
}
+static void __guc_context_sched_enable(struct intel_guc *guc,
+ struct intel_context *ce)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
+ ce->guc_id,
+ GUC_CONTEXT_ENABLE
+ };
+
+ trace_intel_context_sched_enable(ce);
+
+ guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
+}
+
static void __guc_context_sched_disable(struct intel_guc *guc,
struct intel_context *ce,
u16 guc_id)
G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
}
+static void guc_blocked_fence_complete(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ if (!i915_sw_fence_done(&ce->guc_blocked))
+ i915_sw_fence_complete(&ce->guc_blocked);
+}
+
+static void guc_blocked_fence_reinit(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_blocked));
+
+ /*
+ * This fence is always complete unless a pending schedule disable is
+ * outstanding. We arm the fence here and complete it when we receive
+ * the pending schedule disable complete message.
+ */
+ i915_sw_fence_fini(&ce->guc_blocked);
+ i915_sw_fence_reinit(&ce->guc_blocked);
+ i915_sw_fence_await(&ce->guc_blocked);
+ i915_sw_fence_commit(&ce->guc_blocked);
+}
+
static u16 prep_context_pending_disable(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
set_context_pending_disable(ce);
clr_context_enabled(ce);
+ guc_blocked_fence_reinit(ce);
intel_context_get(ce);
return ce->guc_id;
}
+static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+ struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
+ unsigned long flags;
+ struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
+ intel_wakeref_t wakeref;
+ u16 guc_id;
+ bool enabled;
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+ /*
+ * Sync with submission path, increment before below changes to context
+ * state.
+ */
+ spin_lock(&sched_engine->lock);
+ incr_context_blocked(ce);
+ spin_unlock(&sched_engine->lock);
+
+ enabled = context_enabled(ce);
+ if (unlikely(!enabled || submission_disabled(guc))) {
+ if (enabled)
+ clr_context_enabled(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ return &ce->guc_blocked;
+ }
+
+ /*
+ * We add +2 here as the schedule disable complete CTB handler calls
+ * intel_context_sched_disable_unpin (-2 to pin_count).
+ */
+ atomic_add(2, &ce->pin_count);
+
+ guc_id = prep_context_pending_disable(ce);
+
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ __guc_context_sched_disable(guc, ce, guc_id);
+
+ return &ce->guc_blocked;
+}
+
+static void guc_context_unblock(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+ struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
+ unsigned long flags;
+ struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
+ intel_wakeref_t wakeref;
+ bool enable;
+
+ GEM_BUG_ON(context_enabled(ce));
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+ if (unlikely(submission_disabled(guc) ||
+ !intel_context_is_pinned(ce) ||
+ context_pending_disable(ce) ||
+ context_blocked(ce) > 1)) {
+ enable = false;
+ } else {
+ enable = true;
+ set_context_pending_enable(ce);
+ set_context_enabled(ce);
+ intel_context_get(ce);
+ }
+
+ /*
+ * Sync with submission path, decrement after above changes to context
+ * state.
+ */
+ spin_lock(&sched_engine->lock);
+ decr_context_blocked(ce);
+ spin_unlock(&sched_engine->lock);
+
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ if (enable) {
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ __guc_context_sched_enable(guc, ce);
+ }
+}
+
+static void guc_context_cancel_request(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ if (i915_sw_fence_signaled(&rq->submit)) {
+ struct i915_sw_fence *fence = guc_context_block(ce);
+
+ i915_sw_fence_wait(fence);
+ if (!i915_request_completed(rq)) {
+ __i915_request_skip(rq);
+ guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
+ true);
+ }
+ guc_context_unblock(ce);
+ }
+}
+
static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
.ban = guc_context_ban,
+ .cancel_request = guc_context_cancel_request,
+
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
.ban = guc_context_ban,
+ .cancel_request = guc_context_cancel_request,
+
.enter = guc_virtual_context_enter,
.exit = guc_virtual_context_exit,
clr_context_banned(ce);
clr_context_pending_disable(ce);
__guc_signal_context_fence(ce);
+ guc_blocked_fence_complete(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (banned) {