From 8ec52ec8dc6827ea7aed524ed5af7aedcfbc553b Mon Sep 17 00:00:00 2001 From: =?utf8?q?Micha=C5=82=20Winiarski?= Date: Wed, 13 Dec 2017 23:13:51 +0100 Subject: [PATCH] drm/i915/guc: Extract clients allocation to submission_init MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit We can now move the clients allocation to submission_init path, rather than keeping the condition inside submission_enable called on every reset. Signed-off-by: Michał Winiarski Cc: Chris Wilson Cc: Joonas Lahtinen Cc: Michal Wajdeczko Reviewed-by: Michel Thierry Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20171213221352.7173-6-michal.winiarski@intel.com --- drivers/gpu/drm/i915/intel_guc_submission.c | 33 +++++++-------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index c74e78b6ba412..488110602e7eb 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -1149,6 +1149,10 @@ int intel_guc_submission_init(struct intel_guc *guc) goto err_log; GEM_BUG_ON(!guc->ads_vma); + ret = guc_clients_create(guc); + if (ret) + return ret; + for_each_engine(engine, dev_priv, id) { guc->preempt_work[id].engine = engine; INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context); @@ -1172,6 +1176,7 @@ void intel_guc_submission_fini(struct intel_guc *guc) for_each_engine(engine, dev_priv, id) cancel_work_sync(&guc->preempt_work[id].work); + guc_clients_destroy(guc); guc_ads_destroy(guc); intel_guc_log_destroy(guc); guc_stage_desc_pool_destroy(guc); @@ -1277,28 +1282,18 @@ int intel_guc_submission_enable(struct intel_guc *guc) sizeof(struct guc_wq_item) * I915_NUM_ENGINES > GUC_WQ_SIZE); - /* - * We're being called on both module initialization and on reset, - * until this flow is changed, we're using regular client presence to - * determine which case are we in, and whether we should allocate new - * clients or just reset their workqueues. - */ - if (!guc->execbuf_client) { - err = guc_clients_create(guc); - if (err) - return err; - } else { - guc_reset_wq(guc->execbuf_client); - guc_reset_wq(guc->preempt_client); - } + GEM_BUG_ON(!guc->execbuf_client); + + guc_reset_wq(guc->execbuf_client); + guc_reset_wq(guc->preempt_client); err = intel_guc_sample_forcewake(guc); if (err) - goto err_free_clients; + return err; err = guc_clients_doorbell_init(guc); if (err) - goto err_free_clients; + return err; /* Take over from manual control of ELSP (execlists) */ guc_interrupts_capture(dev_priv); @@ -1315,10 +1310,6 @@ int intel_guc_submission_enable(struct intel_guc *guc) } return 0; - -err_free_clients: - guc_clients_destroy(guc); - return err; } void intel_guc_submission_disable(struct intel_guc *guc) @@ -1332,8 +1323,6 @@ void intel_guc_submission_disable(struct intel_guc *guc) /* Revert back to manual ELSP submission */ intel_engines_reset_default_submission(dev_priv); - - guc_clients_destroy(guc); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -- 2.39.5