return NOTIFY_DONE;
}
-/**
- * i915_gem_shrinker_register - Register the i915 shrinker
- * @i915: i915 device
- *
- * This function registers and sets up the i915 shrinker and OOM handler.
- */
-void i915_gem_shrinker_register(struct drm_i915_private *i915)
+void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
{
i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
}
-/**
- * i915_gem_shrinker_unregister - Unregisters the i915 shrinker
- * @i915: i915 device
- *
- * This function unregisters the i915 shrinker and OOM handler.
- */
-void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
+void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
{
WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
static void disable_retire_worker(struct drm_i915_private *i915)
{
- i915_gem_shrinker_unregister(i915);
+ i915_gem_driver_unregister__shrinker(i915);
intel_gt_pm_get(&i915->gt);
igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- i915_gem_shrinker_register(i915);
+ i915_gem_driver_register__shrinker(i915);
}
static void mmap_offset_lock(struct drm_i915_private *i915)
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
__intel_gt_reset(gt, ALL_ENGINES);
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt->i915, id)
engine->submit_request = nop_submit_request;
- engine->schedule = NULL;
- }
- gt->i915->caps.scheduler = 0;
/*
* Make sure no request can slip through without getting completed by
{
struct drm_device *dev = &dev_priv->drm;
- i915_gem_shrinker_register(dev_priv);
+ i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
/*
i915_teardown_sysfs(dev_priv);
drm_dev_unplug(&dev_priv->drm);
- i915_gem_shrinker_unregister(dev_priv);
+ i915_gem_driver_unregister(dev_priv);
}
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
+void i915_gem_driver_register(struct drm_i915_private *i915);
+void i915_gem_driver_unregister(struct drm_i915_private *i915);
void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
void i915_gem_driver_release(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
#define I915_SHRINK_WRITEBACK BIT(4)
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
-void i915_gem_shrinker_register(struct drm_i915_private *i915);
-void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
+void i915_gem_driver_register__shrinker(struct drm_i915_private *i915);
+void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915);
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
struct mutex *mutex);
intel_mocs_init(gt);
- intel_engines_set_scheduler_caps(i915);
-
out:
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
return ret;
}
+void i915_gem_driver_register(struct drm_i915_private *i915)
+{
+ i915_gem_driver_register__shrinker(i915);
+ intel_engines_set_scheduler_caps(i915);
+}
+
+void i915_gem_driver_unregister(struct drm_i915_private *i915)
+{
+ i915_gem_driver_unregister__shrinker(i915);
+}
+
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
GEM_BUG_ON(dev_priv->gt.awake);
*/
local_bh_disable();
i915_sw_fence_commit(&rq->semaphore);
- rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule) {
struct i915_sched_attr attr = rq->gem_context->sched;
engine->schedule(rq, &attr);
}
- rcu_read_unlock();
i915_sw_fence_commit(&rq->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */