rcu_read_unlock();
}
-void i915_sched_engine_free(struct kref *kref)
+static void default_destroy(struct kref *kref)
{
struct i915_sched_engine *sched_engine =
container_of(kref, typeof(*sched_engine), ref);
sched_engine->queue = RB_ROOT_CACHED;
sched_engine->queue_priority_hint = INT_MIN;
+ sched_engine->destroy = default_destroy;
INIT_LIST_HEAD(&sched_engine->requests);
INIT_LIST_HEAD(&sched_engine->hold);
struct i915_sched_engine *
i915_sched_engine_create(unsigned int subclass);
-void i915_sched_engine_free(struct kref *kref);
-
static inline struct i915_sched_engine *
i915_sched_engine_get(struct i915_sched_engine *sched_engine)
{
static inline void
i915_sched_engine_put(struct i915_sched_engine *sched_engine)
{
- kref_put(&sched_engine->ref, i915_sched_engine_free);
+ kref_put(&sched_engine->ref, sched_engine->destroy);
}
static inline bool
*/
void *private_data;
+ /**
+ * @destroy: destroy schedule engine / cleanup in backend
+ */
+ void (*destroy)(struct kref *kref);
+
/**
* @kick_backend: kick backend after a request's priority has changed
*/