From d97aeadec6f66b54ea6a23eb2aedd09c76a62b40 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 3 Dec 2018 13:33:19 +0000 Subject: [PATCH] drm/i915: Record GT workarounds in a list To enable later verification of GT workaround state at various stages of driver lifetime, we record the list of applicable ones per platforms to a list, from which they are also applied. The added data structure is a simple array of register, mask and value items, which is allocated on demand as workarounds are added to the list. This is a temporary implementation which later in the series gets fused with the existing per context workaround list handling. It is separated at this stage since the following patch fixes a bug which needs to be as easy to backport as possible. Also, since in the following patch we will be adding a new class of workarounds (per engine) which can be applied from interrupt context, we straight away make the provision for safe read-modify-write cycle. v2: * Change dev_priv to i915 along the init path. (Chris Wilson) * API rename. (Chris Wilson) v3: * Remove explicit list size tracking in favour of growing the allocation in power of two chunks. (Chris Wilson) v4: Chris Wilson: * Change wa_list_finish to early return. * Copy workarounds using the compiler for static checking. * Do not bother zeroing unused entries. * Re-order struct i915_wa_list. v5: * kmalloc_array. * Whitespace cleanup. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20181203133319.10174-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_gem.c | 4 +- drivers/gpu/drm/i915/intel_workarounds.c | 492 +++++++++++++++-------- drivers/gpu/drm/i915/intel_workarounds.h | 23 +- 5 files changed, 354 insertions(+), 168 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c2a3ba7e09e0f..6f497cf31ffc1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1467,6 +1467,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) intel_uncore_sanitize(dev_priv); + intel_gt_init_workarounds(dev_priv); i915_gem_load_init_fences(dev_priv); /* On the 945G/GM, the chipset reports the MSI capability on the diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 12b8476b09a26..d725390d5a481 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -69,6 +69,7 @@ #include "intel_ringbuffer.h" #include "intel_uncore.h" #include "intel_wopcm.h" +#include "intel_workarounds.h" #include "intel_uc.h" #include "i915_gem.h" @@ -1653,6 +1654,7 @@ struct drm_i915_private { int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; struct i915_workarounds workarounds; + struct i915_wa_list gt_wa_list; struct i915_frontbuffer_tracking fb_tracking; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 834240a9b262a..ad5a115e47b65 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -5300,7 +5300,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); - intel_gt_workarounds_apply(dev_priv); + intel_gt_apply_workarounds(dev_priv); i915_gem_init_swizzling(dev_priv); @@ -5672,6 +5672,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) i915_gem_contexts_fini(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); + intel_wa_list_free(&dev_priv->gt_wa_list); + intel_cleanup_gt_powersave(dev_priv); intel_uc_fini_misc(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c index e5cd6c6c66c3d..c3256ab2c4112 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.c +++ b/drivers/gpu/drm/i915/intel_workarounds.c @@ -48,6 +48,20 @@ * - Public functions to init or apply the given workaround type. */ +static void wa_init_start(struct i915_wa_list *wal, const char *name) +{ + wal->name = name; +} + +static void wa_init_finish(struct i915_wa_list *wal) +{ + if (!wal->count) + return; + + DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n", + wal->count, wal->name); +} + static void wa_add(struct drm_i915_private *i915, i915_reg_t reg, const u32 mask, const u32 val) { @@ -575,160 +589,239 @@ int intel_ctx_workarounds_emit(struct i915_request *rq) return 0; } -static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void +wal_add(struct i915_wa_list *wal, const struct i915_wa *wa) +{ + const unsigned int grow = 1 << 4; + + GEM_BUG_ON(!is_power_of_2(grow)); + + if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */ + struct i915_wa *list; + + list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa), + GFP_KERNEL); + if (!list) { + DRM_ERROR("No space for workaround init!\n"); + return; + } + + if (wal->list) + memcpy(list, wal->list, sizeof(*wa) * wal->count); + + wal->list = list; + } + + wal->list[wal->count++] = *wa; +} + +static void +wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + struct i915_wa wa = { + .reg = reg, + .mask = val, + .val = _MASKED_BIT_ENABLE(val) + }; + + wal_add(wal, &wa); +} + +static void +wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, + u32 val) { + struct i915_wa wa = { + .reg = reg, + .mask = mask, + .val = val + }; + + wal_add(wal, &wa); +} + +static void +wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + wa_write_masked_or(wal, reg, ~0, val); } -static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void +wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) { + wa_write_masked_or(wal, reg, val, val); } -static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void gen9_gt_workarounds_init(struct drm_i915_private *i915) { + struct i915_wa_list *wal = &i915->gt_wa_list; + /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ - I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, - _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); + wa_masked_en(wal, + GEN9_CSFE_CHICKEN1_RCS, + GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE); + /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ - I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | - GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); + wa_write_or(wal, + BDW_SCRATCH1, + GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); /* WaDisableKillLogic:bxt,skl,kbl */ - if (!IS_COFFEELAKE(dev_priv)) - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | - ECOCHK_DIS_TLB); + if (!IS_COFFEELAKE(i915)) + wa_write_or(wal, + GAM_ECOCHK, + ECOCHK_DIS_TLB); - if (HAS_LLC(dev_priv)) { + if (HAS_LLC(i915)) { /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl * * Must match Display Engine. See * WaCompressedResourceDisplayNewHashMode. */ - I915_WRITE(MMCD_MISC_CTRL, - I915_READ(MMCD_MISC_CTRL) | - MMCD_PCLA | - MMCD_HOTSPOT_EN); + wa_write_or(wal, + MMCD_MISC_CTRL, + MMCD_PCLA | MMCD_HOTSPOT_EN); } /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | - BDW_DISABLE_HDC_INVALIDATION); + wa_write_or(wal, + GAM_ECOCHK, + BDW_DISABLE_HDC_INVALIDATION); /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ - if (IS_GEN9_LP(dev_priv)) { - u32 val = I915_READ(GEN8_L3SQCREG1); - - val &= ~L3_PRIO_CREDITS_MASK; - val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); - I915_WRITE(GEN8_L3SQCREG1, val); - } + if (IS_GEN9_LP(i915)) + wa_write_masked_or(wal, + GEN8_L3SQCREG1, + L3_PRIO_CREDITS_MASK, + L3_GENERAL_PRIO_CREDITS(62) | + L3_HIGH_PRIO_CREDITS(2)); /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ - I915_WRITE(GEN8_L3SQCREG4, - I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES); + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN8_LQSC_FLUSH_COHERENT_LINES); /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ - I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, - _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); + wa_masked_en(wal, + GEN7_FF_SLICE_CS_CHICKEN1, + GEN9_FFSC_PERCTX_PREEMPT_CTRL); } -static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void skl_gt_workarounds_init(struct drm_i915_private *i915) { - gen9_gt_workarounds_apply(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; + + gen9_gt_workarounds_init(i915); /* WaEnableGapsTsvCreditFix:skl */ - I915_WRITE(GEN8_GARBCNTL, - I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); + wa_write_or(wal, + GEN8_GARBCNTL, + GEN9_GAPS_TSV_CREDIT_DISABLE); /* WaDisableGafsUnitClkGating:skl */ - I915_WRITE(GEN7_UCGCTL4, - I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); /* WaInPlaceDecompressionHang:skl */ - if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER)) + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } -static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void bxt_gt_workarounds_init(struct drm_i915_private *i915) { - gen9_gt_workarounds_apply(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; + + gen9_gt_workarounds_init(i915); /* WaDisablePooledEuLoadBalancingFix:bxt */ - I915_WRITE(FF_SLICE_CS_CHICKEN2, - _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); + wa_masked_en(wal, + FF_SLICE_CS_CHICKEN2, + GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); /* WaInPlaceDecompressionHang:bxt */ - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } -static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void kbl_gt_workarounds_init(struct drm_i915_private *i915) { - gen9_gt_workarounds_apply(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; + + gen9_gt_workarounds_init(i915); /* WaEnableGapsTsvCreditFix:kbl */ - I915_WRITE(GEN8_GARBCNTL, - I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); + wa_write_or(wal, + GEN8_GARBCNTL, + GEN9_GAPS_TSV_CREDIT_DISABLE); /* WaDisableDynamicCreditSharing:kbl */ - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) - I915_WRITE(GAMT_CHKN_BIT_REG, - I915_READ(GAMT_CHKN_BIT_REG) | - GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); + if (IS_KBL_REVID(i915, 0, KBL_REVID_B0)) + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); /* WaDisableGafsUnitClkGating:kbl */ - I915_WRITE(GEN7_UCGCTL4, - I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); /* WaInPlaceDecompressionHang:kbl */ - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); /* WaKBLVECSSemaphoreWaitPoll:kbl */ - if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) { + if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) { struct intel_engine_cs *engine; unsigned int tmp; - for_each_engine(engine, dev_priv, tmp) { + for_each_engine(engine, i915, tmp) { if (engine->id == RCS) continue; - I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1); + wa_write(wal, + RING_SEMA_WAIT_POLL(engine->mmio_base), + 1); } } } -static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void glk_gt_workarounds_init(struct drm_i915_private *i915) { - gen9_gt_workarounds_apply(dev_priv); + gen9_gt_workarounds_init(i915); } -static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void cfl_gt_workarounds_init(struct drm_i915_private *i915) { - gen9_gt_workarounds_apply(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; + + gen9_gt_workarounds_init(i915); /* WaEnableGapsTsvCreditFix:cfl */ - I915_WRITE(GEN8_GARBCNTL, - I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); + wa_write_or(wal, + GEN8_GARBCNTL, + GEN9_GAPS_TSV_CREDIT_DISABLE); /* WaDisableGafsUnitClkGating:cfl */ - I915_WRITE(GEN7_UCGCTL4, - I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); /* WaInPlaceDecompressionHang:cfl */ - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } static void wa_init_mcr(struct drm_i915_private *dev_priv) { const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); - u32 mcr; + struct i915_wa_list *wal = &dev_priv->gt_wa_list; u32 mcr_slice_subslice_mask; /* @@ -765,8 +858,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv) WARN_ON((enabled_mask & disabled_mask) != enabled_mask); } - mcr = I915_READ(GEN8_MCR_SELECTOR); - if (INTEL_GEN(dev_priv) >= 11) mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; @@ -784,156 +875,225 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv) * occasions, such as INSTDONE, where this value is dependent * on s/ss combo, the read should be done with read_subslice_reg. */ - mcr &= ~mcr_slice_subslice_mask; - mcr |= intel_calculate_mcr_s_ss_select(dev_priv); - I915_WRITE(GEN8_MCR_SELECTOR, mcr); + wa_write_masked_or(wal, + GEN8_MCR_SELECTOR, + mcr_slice_subslice_mask, + intel_calculate_mcr_s_ss_select(dev_priv)); } -static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void cnl_gt_workarounds_init(struct drm_i915_private *i915) { - wa_init_mcr(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; + + wa_init_mcr(i915); /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ - if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) - I915_WRITE(GAMT_CHKN_BIT_REG, - I915_READ(GAMT_CHKN_BIT_REG) | - GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); + if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); /* WaInPlaceDecompressionHang:cnl */ - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); /* WaEnablePreemptionGranularityControlByUMD:cnl */ - I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, - _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); + wa_masked_en(wal, + GEN7_FF_SLICE_CS_CHICKEN1, + GEN9_FFSC_PERCTX_PREEMPT_CTRL); } -static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void icl_gt_workarounds_init(struct drm_i915_private *i915) { - wa_init_mcr(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; + + wa_init_mcr(i915); /* This is not an Wa. Enable for better image quality */ - I915_WRITE(_3D_CHICKEN3, - _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); + wa_masked_en(wal, + _3D_CHICKEN3, + _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE); /* WaInPlaceDecompressionHang:icl */ - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); /* WaPipelineFlushCoherentLines:icl */ - I915_WRITE(GEN8_L3SQCREG4, - I915_READ(GEN8_L3SQCREG4) | - GEN8_LQSC_FLUSH_COHERENT_LINES); + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN8_LQSC_FLUSH_COHERENT_LINES); /* Wa_1405543622:icl * Formerly known as WaGAPZPriorityScheme */ - I915_WRITE(GEN8_GARBCNTL, - I915_READ(GEN8_GARBCNTL) | - GEN11_ARBITRATION_PRIO_ORDER_MASK); + wa_write_or(wal, + GEN8_GARBCNTL, + GEN11_ARBITRATION_PRIO_ORDER_MASK); /* Wa_1604223664:icl * Formerly known as WaL3BankAddressHashing */ - I915_WRITE(GEN8_GARBCNTL, - (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) | - GEN11_HASH_CTRL_EXCL_BIT0); - I915_WRITE(GEN11_GLBLINVL, - (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) | - GEN11_BANK_HASH_ADDR_EXCL_BIT0); + wa_write_masked_or(wal, + GEN8_GARBCNTL, + GEN11_HASH_CTRL_EXCL_MASK, + GEN11_HASH_CTRL_EXCL_BIT0); + wa_write_masked_or(wal, + GEN11_GLBLINVL, + GEN11_BANK_HASH_ADDR_EXCL_MASK, + GEN11_BANK_HASH_ADDR_EXCL_BIT0); /* WaModifyGamTlbPartitioning:icl */ - I915_WRITE(GEN11_GACB_PERF_CTRL, - (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) | - GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); + wa_write_masked_or(wal, + GEN11_GACB_PERF_CTRL, + GEN11_HASH_CTRL_MASK, + GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); /* Wa_1405733216:icl * Formerly known as WaDisableCleanEvicts */ - I915_WRITE(GEN8_L3SQCREG4, - I915_READ(GEN8_L3SQCREG4) | - GEN11_LQSC_CLEAN_EVICT_DISABLE); + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN11_LQSC_CLEAN_EVICT_DISABLE); /* Wa_1405766107:icl * Formerly known as WaCL2SFHalfMaxAlloc */ - I915_WRITE(GEN11_LSN_UNSLCVC, - I915_READ(GEN11_LSN_UNSLCVC) | - GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | - GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); + wa_write_or(wal, + GEN11_LSN_UNSLCVC, + GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | + GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); /* Wa_220166154:icl * Formerly known as WaDisCtxReload */ - I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA, - I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) | - GAMW_ECO_DEV_CTX_RELOAD_DISABLE); + wa_write_or(wal, + GEN8_GAMW_ECO_DEV_RW_IA, + GAMW_ECO_DEV_CTX_RELOAD_DISABLE); /* Wa_1405779004:icl (pre-prod) */ - if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) - I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, - I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | - MSCUNIT_CLKGATE_DIS); + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) + wa_write_or(wal, + SLICE_UNIT_LEVEL_CLKGATE, + MSCUNIT_CLKGATE_DIS); /* Wa_1406680159:icl */ - I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, - I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) | - GWUNIT_CLKGATE_DIS); + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE, + GWUNIT_CLKGATE_DIS); /* Wa_1406838659:icl (pre-prod) */ - if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) - I915_WRITE(INF_UNIT_LEVEL_CLKGATE, - I915_READ(INF_UNIT_LEVEL_CLKGATE) | - CGPSF_CLKGATE_DIS); + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) + wa_write_or(wal, + INF_UNIT_LEVEL_CLKGATE, + CGPSF_CLKGATE_DIS); /* WaForwardProgressSoftReset:icl */ - I915_WRITE(GEN10_SCRATCH_LNCF2, - I915_READ(GEN10_SCRATCH_LNCF2) | - PMFLUSHDONE_LNICRSDROP | - PMFLUSH_GAPL3UNBLOCK | - PMFLUSHDONE_LNEBLK); + wa_write_or(wal, + GEN10_SCRATCH_LNCF2, + PMFLUSHDONE_LNICRSDROP | + PMFLUSH_GAPL3UNBLOCK | + PMFLUSHDONE_LNEBLK); /* Wa_1406463099:icl * Formerly known as WaGamTlbPendError */ - I915_WRITE(GAMT_CHKN_BIT_REG, - I915_READ(GAMT_CHKN_BIT_REG) | - GAMT_CHKN_DISABLE_L3_COH_PIPE); + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_L3_COH_PIPE); /* Wa_1406609255:icl (pre-prod) */ - if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) - I915_WRITE(GEN7_SARCHKMD, - I915_READ(GEN7_SARCHKMD) | - GEN7_DISABLE_DEMAND_PREFETCH | - GEN7_DISABLE_SAMPLER_PREFETCH); + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) + wa_write_or(wal, + GEN7_SARCHKMD, + GEN7_DISABLE_DEMAND_PREFETCH | + GEN7_DISABLE_SAMPLER_PREFETCH); } -void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) +void intel_gt_init_workarounds(struct drm_i915_private *i915) { - if (INTEL_GEN(dev_priv) < 8) + struct i915_wa_list *wal = &i915->gt_wa_list; + + wa_init_start(wal, "GT"); + + if (INTEL_GEN(i915) < 8) return; - else if (IS_BROADWELL(dev_priv)) - bdw_gt_workarounds_apply(dev_priv); - else if (IS_CHERRYVIEW(dev_priv)) - chv_gt_workarounds_apply(dev_priv); - else if (IS_SKYLAKE(dev_priv)) - skl_gt_workarounds_apply(dev_priv); - else if (IS_BROXTON(dev_priv)) - bxt_gt_workarounds_apply(dev_priv); - else if (IS_KABYLAKE(dev_priv)) - kbl_gt_workarounds_apply(dev_priv); - else if (IS_GEMINILAKE(dev_priv)) - glk_gt_workarounds_apply(dev_priv); - else if (IS_COFFEELAKE(dev_priv)) - cfl_gt_workarounds_apply(dev_priv); - else if (IS_CANNONLAKE(dev_priv)) - cnl_gt_workarounds_apply(dev_priv); - else if (IS_ICELAKE(dev_priv)) - icl_gt_workarounds_apply(dev_priv); + else if (IS_BROADWELL(i915)) + return; + else if (IS_CHERRYVIEW(i915)) + return; + else if (IS_SKYLAKE(i915)) + skl_gt_workarounds_init(i915); + else if (IS_BROXTON(i915)) + bxt_gt_workarounds_init(i915); + else if (IS_KABYLAKE(i915)) + kbl_gt_workarounds_init(i915); + else if (IS_GEMINILAKE(i915)) + glk_gt_workarounds_init(i915); + else if (IS_COFFEELAKE(i915)) + cfl_gt_workarounds_init(i915); + else if (IS_CANNONLAKE(i915)) + cnl_gt_workarounds_init(i915); + else if (IS_ICELAKE(i915)) + icl_gt_workarounds_init(i915); else - MISSING_CASE(INTEL_GEN(dev_priv)); + MISSING_CASE(INTEL_GEN(i915)); + + wa_init_finish(wal); +} + +static enum forcewake_domains +wal_get_fw_for_rmw(struct drm_i915_private *dev_priv, + const struct i915_wa_list *wal) +{ + enum forcewake_domains fw = 0; + struct i915_wa *wa; + unsigned int i; + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + fw |= intel_uncore_forcewake_for_reg(dev_priv, + wa->reg, + FW_REG_READ | + FW_REG_WRITE); + + return fw; +} + +static void +wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal) +{ + enum forcewake_domains fw; + unsigned long flags; + struct i915_wa *wa; + unsigned int i; + + if (!wal->count) + return; + + fw = wal_get_fw_for_rmw(dev_priv, wal); + + spin_lock_irqsave(&dev_priv->uncore.lock, flags); + intel_uncore_forcewake_get__locked(dev_priv, fw); + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + u32 val = I915_READ_FW(wa->reg); + + val &= ~wa->mask; + val |= wa->val; + + I915_WRITE_FW(wa->reg, val); + } + + intel_uncore_forcewake_put__locked(dev_priv, fw); + spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); + + DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name); +} + +void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv) +{ + wa_list_apply(dev_priv, &dev_priv->gt_wa_list); } struct whitelist { diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h index b11d0623e6268..263106600fdc0 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.h +++ b/drivers/gpu/drm/i915/intel_workarounds.h @@ -7,10 +7,31 @@ #ifndef _I915_WORKAROUNDS_H_ #define _I915_WORKAROUNDS_H_ +#include + +struct i915_wa { + i915_reg_t reg; + u32 mask; + u32 val; +}; + +struct i915_wa_list { + const char *name; + struct i915_wa *list; + unsigned int count; +}; + +static inline void intel_wa_list_free(struct i915_wa_list *wal) +{ + kfree(wal->list); + memset(wal, 0, sizeof(*wal)); +} + int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv); int intel_ctx_workarounds_emit(struct i915_request *rq); -void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv); +void intel_gt_init_workarounds(struct drm_i915_private *dev_priv); +void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv); void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine); -- 2.39.5