spin_unlock(&ctx->hw_contexts_lock);
}
-static struct intel_context *
+struct intel_context *
intel_context_instance(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
ce = intel_context_lookup(ctx, engine);
if (likely(ce))
- return ce;
+ return intel_context_get(ce);
ce = intel_context_alloc();
if (!ce)
intel_context_free(ce);
GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
- return pos;
+ return intel_context_get(pos);
}
struct intel_context *
if (IS_ERR(ce))
return ce;
- if (mutex_lock_interruptible(&ce->pin_mutex))
+ if (mutex_lock_interruptible(&ce->pin_mutex)) {
+ intel_context_put(ce);
return ERR_PTR(-EINTR);
+ }
return ce;
}
-struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+void intel_context_pin_unlock(struct intel_context *ce)
+ __releases(ce->pin_mutex)
{
- struct intel_context *ce;
- int err;
-
- ce = intel_context_instance(ctx, engine);
- if (IS_ERR(ce))
- return ce;
+ mutex_unlock(&ce->pin_mutex);
+ intel_context_put(ce);
+}
- if (likely(atomic_inc_not_zero(&ce->pin_count)))
- return ce;
+int __intel_context_do_pin(struct intel_context *ce)
+{
+ int err;
if (mutex_lock_interruptible(&ce->pin_mutex))
- return ERR_PTR(-EINTR);
+ return -EINTR;
if (likely(!atomic_read(&ce->pin_count))) {
+ struct i915_gem_context *ctx = ce->gem_context;
intel_wakeref_t wakeref;
err = 0;
goto err;
i915_gem_context_get(ctx);
- GEM_BUG_ON(ce->gem_context != ctx);
mutex_lock(&ctx->mutex);
list_add(&ce->active_link, &ctx->active_engines);
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
mutex_unlock(&ce->pin_mutex);
- return ce;
+ return 0;
err:
mutex_unlock(&ce->pin_mutex);
- return ERR_PTR(err);
+ return err;
}
void intel_context_unpin(struct intel_context *ce)
return atomic_read(&ce->pin_count);
}
-static inline void intel_context_pin_unlock(struct intel_context *ce)
-__releases(ce->pin_mutex)
-{
- mutex_unlock(&ce->pin_mutex);
-}
+void intel_context_pin_unlock(struct intel_context *ce);
struct intel_context *
__intel_context_insert(struct i915_gem_context *ctx,
__intel_context_remove(struct intel_context *ce);
struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+intel_context_instance(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+int __intel_context_do_pin(struct intel_context *ce);
+
+static inline int intel_context_pin(struct intel_context *ce)
+{
+ if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ return 0;
+
+ return __intel_context_do_pin(ce);
+}
static inline void __intel_context_pin(struct intel_context *ce)
{
struct intel_context **out)
{
struct intel_context *ce;
+ int err;
- ce = intel_context_pin(ctx, engine);
+ ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ err = intel_context_pin(ce);
+ intel_context_put(ce);
+ if (err)
+ return err;
+
*out = ce;
return 0;
}
int id)
{
struct mock_engine *engine;
+ int err;
GEM_BUG_ON(id >= I915_NUM_ENGINES);
INIT_LIST_HEAD(&engine->hw_queue);
engine->base.kernel_context =
- intel_context_pin(i915->kernel_context, &engine->base);
+ intel_context_instance(i915->kernel_context, &engine->base);
if (IS_ERR(engine->base.kernel_context))
goto err_breadcrumbs;
+ err = intel_context_pin(engine->base.kernel_context);
+ intel_context_put(engine->base.kernel_context);
+ if (err)
+ goto err_breadcrumbs;
+
return &engine->base;
err_breadcrumbs:
INIT_LIST_HEAD(&s->workload_q_head[i]);
s->shadow[i] = ERR_PTR(-EINVAL);
- ce = intel_context_pin(ctx, engine);
+ ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
goto out_shadow_ctx;
}
+ ret = intel_context_pin(ce);
+ intel_context_put(ce);
+ if (ret)
+ goto out_shadow_ctx;
+
s->shadow[i] = ce;
}
if (err)
return err;
+ ce = intel_context_instance(eb->gem_context, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
/*
* Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ce = intel_context_pin(eb->gem_context, engine);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
+ err = intel_context_pin(ce);
+ intel_context_put(ce);
+ if (err)
+ return err;
eb->engine = engine;
eb->context = ce;
{
struct intel_engine_cs *engine = i915->engine[RCS0];
struct intel_context *ce;
- int ret;
+ int err;
- ret = i915_mutex_lock_interruptible(&i915->drm);
- if (ret)
- return ERR_PTR(ret);
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ err = i915_mutex_lock_interruptible(&i915->drm);
+ if (err) {
+ intel_context_put(ce);
+ return ERR_PTR(err);
+ }
/*
* As the ID is the gtt offset of the context's vma we
*
* NB: implied RCS engine...
*/
- ce = intel_context_pin(ctx, engine);
+ err = intel_context_pin(ce);
mutex_unlock(&i915->drm.struct_mutex);
- if (IS_ERR(ce))
- return ce;
+ intel_context_put(ce);
+ if (err)
+ return ERR_PTR(err);
i915->perf.oa.pinned_ctx = ce;
struct drm_i915_private *i915 = engine->i915;
struct intel_context *ce;
struct i915_request *rq;
+ int err;
/*
* Preempt contexts are reserved for exclusive use to inject a
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ce = intel_context_pin(ctx, engine);
+ ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ERR_CAST(ce);
+ err = intel_context_pin(ce);
+ if (err) {
+ rq = ERR_PTR(err);
+ goto err_put;
+ }
+
rq = i915_request_create(ce);
intel_context_unpin(ce);
+err_put:
+ intel_context_put(ce);
return rq;
}