return err;
}
-void i915_gem_context_release(struct kref *ref)
+static void i915_gem_context_release_work(struct work_struct *work)
{
- struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
+ struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
+ release_work);
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
kfree_rcu(ctx, rcu);
}
+void i915_gem_context_release(struct kref *ref)
+{
+ struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
+
+ queue_work(ctx->i915->wq, &ctx->release_work);
+}
+
static inline struct i915_gem_engines *
__context_engines_static(const struct i915_gem_context *ctx)
{
ctx->sched = pc->sched;
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->link);
+ INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines);
*/
struct kref ref;
+ /**
+ * @release_work:
+ *
+ * Work item for deferred cleanup, since i915_gem_context_put() tends to
+ * be called from hardirq context.
+ *
+ * FIXME: The only real reason for this is &i915_gem_engines.fence, all
+ * other callers are from process context and need at most some mild
+ * shuffling to pull the i915_gem_context_put() call out of a spinlock.
+ */
+ struct work_struct release_work;
+
/**
* @rcu: rcu_head for deferred freeing.
*/