return err;
}
+static inline void guc_set_lrc_tail(struct i915_request *rq)
+{
+ rq->context->lrc_reg_state[CTX_RING_TAIL] =
+ intel_ring_set_tail(rq->ring, rq->tail);
+}
+
static inline int rq_prio(const struct i915_request *rq)
{
return rq->sched.attr.priority;
}
done:
if (submit) {
- last->context->lrc_reg_state[CTX_RING_TAIL] =
- intel_ring_set_tail(last->ring, last->tail);
+ guc_set_lrc_tail(last);
resubmit:
/*
* We only check for -EBUSY here even though it is possible for
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
+static int guc_bypass_tasklet_submit(struct intel_guc *guc,
+ struct i915_request *rq)
+{
+ int ret;
+
+ __i915_request_submit(rq);
+
+ trace_i915_request_in(rq, 0);
+
+ guc_set_lrc_tail(rq);
+ ret = guc_add_request(guc, rq);
+ if (ret == -EBUSY)
+ guc->stalled_request = rq;
+
+ return ret;
+}
+
static void guc_submit_request(struct i915_request *rq)
{
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
+ struct intel_guc *guc = &rq->engine->gt->uc.guc;
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&sched_engine->lock, flags);
- queue_request(sched_engine, rq, rq_prio(rq));
-
- GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
- GEM_BUG_ON(list_empty(&rq->sched.link));
-
- tasklet_hi_schedule(&sched_engine->tasklet);
+ if (guc->stalled_request || !i915_sched_engine_is_empty(sched_engine))
+ queue_request(sched_engine, rq, rq_prio(rq));
+ else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
+ tasklet_hi_schedule(&sched_engine->tasklet);
spin_unlock_irqrestore(&sched_engine->lock, flags);
}