I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
- /*
- * Requests containing performance queries must not be preempted by
- * another context. They get scheduled with their default priority and
- * once they reach the execlist ports we ensure that they stick on the
- * HW until finished by pretending that they have maximum priority,
- * i.e. nothing can have higher priority and force us to usurp the
- * active request.
- */
- I915_PRIORITY_UNPREEMPTABLE = INT_MAX,
-
- I915_PRIORITY_INVALID = INT_MIN
};
#define I915_USER_PRIORITY_SHIFT 2
#define I915_PRIORITY_WAIT ((u8)BIT(0))
#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(1))
+/* Smallest priority value that cannot be bumped. */
+#define I915_PRIORITY_INVALID (INT_MIN | (u8)I915_PRIORITY_MASK)
+
+/*
+ * Requests containing performance queries must not be preempted by
+ * another context. They get scheduled with their default priority and
+ * once they reach the execlist ports we ensure that they stick on the
+ * HW until finished by pretending that they have maximum priority,
+ * i.e. nothing can have higher priority and force us to usurp the
+ * active request.
+ */
+#define I915_PRIORITY_UNPREEMPTABLE INT_MAX
+
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
struct i915_priolist {
list_add(&ring->active_link, &rq->i915->gt.active_rings);
rq->emitted_jiffies = jiffies;
+ return prev;
+}
+
+void __i915_request_queue(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
/*
* Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
*/
local_bh_disable();
i915_sw_fence_commit(&rq->semaphore);
- if (engine->schedule) {
- struct i915_sched_attr attr = rq->gem_context->sched;
-
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
- if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
- attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
- if (list_empty(&rq->sched.signalers_list))
- attr.priority |= I915_PRIORITY_WAIT;
-
- engine->schedule(rq, &attr);
- }
+ if (attr && rq->engine->schedule)
+ rq->engine->schedule(rq, attr);
i915_sw_fence_commit(&rq->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
-
- return prev;
}
void i915_request_add(struct i915_request *rq)
{
+ struct i915_sched_attr attr = rq->gem_context->sched;
struct i915_request *prev;
lockdep_assert_held(&rq->timeline->mutex);
prev = __i915_request_commit(rq);
+ /*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (list_empty(&rq->sched.signalers_list))
+ attr.priority |= I915_PRIORITY_WAIT;
+
+ __i915_request_queue(rq, &attr);
+
/*
* In typical scenarios, we do not expect the previous request on
* the timeline to be still tracked by timeline->last_request if it