This reverts commit
a546b60b512f ("drm/i915: Expand subslice mask"),
which kills ICL due to GEM_BUG_ON() sanity checks before CI even gets a
chance to do anything.
The commit exposes an issue in commit
509e7c23c4ab ("drm/i915/cnl:
Implement WaProgramMgsrForCorrectSliceSpecificMmioReads"), which will
also need to be addressed.
There's a proposed fix [1], but considering the seeming uncertainty with
the fix as well as the size of the regressing commit (in this context,
the one that actually brings down ICL), this warrants a revert to get
ICL working, and gives us time to get all of this right without
rushing. Even if this means shooting the messenger.
<3>[ 9.426327] intel_sseu_get_subslices:46 GEM_BUG_ON(slice >= sseu->max_slices)
<4>[ 9.426355] ------------[ cut here ]------------
<2>[ 9.426357] kernel BUG at drivers/gpu/drm/i915/gt/intel_sseu.c:46!
<4>[ 9.426371] invalid opcode: 0000 [#1] PREEMPT SMP NOPTI
<4>[ 9.426377] CPU: 1 PID: 364 Comm: systemd-udevd Not tainted 5.2.0-rc2-CI-CI_DRM_6159+ #1
<4>[ 9.426385] Hardware name: Intel Corporation Ice Lake Client Platform/IceLake U DDR4 SODIMM PD RVP TLC, BIOS ICLSFWR1.R00.3183.A00.
1905020411 05/02/2019
<4>[ 9.426444] RIP: 0010:intel_sseu_get_subslices+0x8a/0xe0 [i915]
<4>[ 9.426452] Code: d5 76 b7 e0 48 8b 35 9d 24 21 00 49 c7 c0 07 f0 72 a0 b9 2e 00 00 00 48 c7 c2 00 8e 6d a0 48 c7 c7 a5 14 5b a0 e8 36 3c be e0 <0f> 0b 48 c7 c1 80 d5 6f a0 ba 30 00 00 00 48 c7 c6 00 8e 6d a0 48
<4>[ 9.426468] RSP: 0018:
ffffc9000037b9c8 EFLAGS:
00010282
<4>[ 9.426475] RAX:
000000000000000f RBX:
0000000000000000 RCX:
0000000000000000
<4>[ 9.426482] RDX:
0000000000000001 RSI:
0000000000000008 RDI:
ffff88849e346f98
<4>[ 9.426490] RBP:
ffff88848a200000 R08:
0000000000000004 R09:
ffff88849d50b000
<4>[ 9.426497] R10:
0000000000000000 R11:
ffff88849e346f98 R12:
ffff88848a209e78
<4>[ 9.426505] R13:
0000000003000000 R14:
ffff88848a20b1a8 R15:
0000000000000000
<4>[ 9.426513] FS:
00007f73d5ae8680(0000) GS:
ffff88849fc80000(0000) knlGS:
0000000000000000
<4>[ 9.426521] CS: 0010 DS: 0000 ES: 0000 CR0:
0000000080050033
<4>[ 9.426527] CR2:
0000561417b01260 CR3:
0000000494764003 CR4:
0000000000760ee0
<4>[ 9.426535] PKRU:
55555554
<4>[ 9.426538] Call Trace:
<4>[ 9.426585] wa_init_mcr+0xd5/0x110 [i915]
<4>[ 9.426597] ? lock_acquire+0xa6/0x1c0
<4>[ 9.426645] icl_gt_workarounds_init+0x21/0x1a0 [i915]
<4>[ 9.426694] ? i915_driver_load+0xfcf/0x18a0 [i915]
<4>[ 9.426739] gt_init_workarounds+0x14c/0x230 [i915]
<4>[ 9.426748] ? _raw_spin_unlock_irq+0x24/0x50
<4>[ 9.426789] intel_gt_init_workarounds+0x1b/0x30 [i915]
<4>[ 9.426835] i915_driver_load+0xfd7/0x18a0 [i915]
<4>[ 9.426843] ? lock_acquire+0xa6/0x1c0
<4>[ 9.426850] ? __pm_runtime_resume+0x4f/0x80
<4>[ 9.426857] ? _raw_spin_unlock_irqrestore+0x4c/0x60
<4>[ 9.426863] ? _raw_spin_unlock_irqrestore+0x4c/0x60
<4>[ 9.426870] ? lockdep_hardirqs_on+0xe3/0x1b0
<4>[ 9.426915] i915_pci_probe+0x29/0xa0 [i915]
<4>[ 9.426923] pci_device_probe+0x9e/0x120
<4>[ 9.426930] really_probe+0xea/0x3c0
<4>[ 9.426936] driver_probe_device+0x10b/0x120
<4>[ 9.426942] device_driver_attach+0x4a/0x50
<4>[ 9.426948] __driver_attach+0x97/0x130
<4>[ 9.426954] ? device_driver_attach+0x50/0x50
<4>[ 9.426960] bus_for_each_dev+0x74/0xc0
<4>[ 9.426966] bus_add_driver+0x13f/0x210
<4>[ 9.426971] ? 0xffffffffa083b000
<4>[ 9.426976] driver_register+0x56/0xe0
<4>[ 9.426982] ? 0xffffffffa083b000
<4>[ 9.426987] do_one_initcall+0x58/0x300
<4>[ 9.426994] ? do_init_module+0x1d/0x1f6
<4>[ 9.427001] ? rcu_read_lock_sched_held+0x6f/0x80
<4>[ 9.427007] ? kmem_cache_alloc_trace+0x261/0x290
<4>[ 9.427014] do_init_module+0x56/0x1f6
<4>[ 9.427020] load_module+0x24d1/0x2990
<4>[ 9.427032] ? __se_sys_finit_module+0xd3/0xf0
<4>[ 9.427037] __se_sys_finit_module+0xd3/0xf0
<4>[ 9.427047] do_syscall_64+0x55/0x1c0
<4>[ 9.427053] entry_SYSCALL_64_after_hwframe+0x49/0xbe
<4>[ 9.427059] RIP: 0033:0x7f73d5609839
<4>[ 9.427064] Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1f f6 2c 00 f7 d8 64 89 01 48
<4>[ 9.427082] RSP: 002b:
00007ffdf34477b8 EFLAGS:
00000246 ORIG_RAX:
0000000000000139
<4>[ 9.427091] RAX:
ffffffffffffffda RBX:
00005559fd5d7b40 RCX:
00007f73d5609839
<4>[ 9.427099] RDX:
0000000000000000 RSI:
00007f73d52e8145 RDI:
000000000000000f
<4>[ 9.427106] RBP:
00007f73d52e8145 R08:
0000000000000000 R09:
00007ffdf34478d0
<4>[ 9.427114] R10:
000000000000000f R11:
0000000000000246 R12:
0000000000000000
<4>[ 9.427121] R13:
00005559fd5c90f0 R14:
0000000000020000 R15:
00005559fd5d7b40
<4>[ 9.427131] Modules linked in: i915(+) mei_hdcp x86_pkg_temp_thermal coretemp snd_hda_intel crct10dif_pclmul crc32_pclmul snd_hda_codec snd_hwdep e1000e snd_hda_core ghash_clmulni_intel ptp snd_pcm cdc_ether usbnet mii pps_core mei_me mei prime_numbers btusb btrtl btbcm btintel bluetooth ecdh_generic ecc
<4>[ 9.427254] ---[ end trace
af3eeb543bd66e66 ]---
[1] http://patchwork.freedesktop.org/patch/msgid/
20190528200655.11605-1-chris@chris-wilson.co.uk
References: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/fi-icl-u2/pstore0-1517155098_Oops_1.log
References:
509e7c23c4ab ("drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads")
Fixes: a546b60b512f ("drm/i915: Expand subslice mask")
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Manasi Navare <manasi.d.navare@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Stuart Summers <stuart.summers@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Yunwei Zhang <yunwei.zhang@intel.com>
Acked-by: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190529082150.31526-1-jani.nikula@intel.com
}
}
-static inline u32
-intel_sseu_fls_subslice(const struct sseu_dev_info *sseu, u32 slice)
-{
- u32 subslice;
- int i;
-
- for (i = sseu->ss_stride - 1; i >= 0; i--) {
- subslice = fls(sseu->subslice_mask[slice * sseu->ss_stride +
- i]);
- if (subslice) {
- subslice += i * BITS_PER_BYTE;
- break;
- }
- }
-
- return subslice;
-}
-
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 mcr_s_ss_select;
u32 slice = fls(sseu->slice_mask);
- u32 subslice = intel_sseu_fls_subslice(sseu, slice);
+ u32 subslice = fls(sseu->subslice_mask[slice]);
if (IS_GEN(dev_priv, 10))
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
struct intel_instdone *instdone)
{
struct drm_i915_private *dev_priv = engine->i915;
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
- for_each_instdone_slice_subslice(dev_priv, sseu, slice,
- subslice) {
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(dev_priv, slice, subslice,
GEN7_SAMPLER_INSTDONE);
return engine->flags & I915_ENGINE_IS_VIRTUAL;
}
-#define instdone_has_slice(dev_priv___, sseu___, slice___) \
- ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & \
- BIT(slice___))
-
-#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
- (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
- intel_sseu_has_subslice(sseu__, slice__, subslice__))
-
-#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
- for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \
- (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \
- (slice_) += ((subslice_) == 0)) \
- for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
- (instdone_has_subslice(dev_priv_, sseu_, slice_, \
- subslice_)))
+#define instdone_slice_mask(dev_priv__) \
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
+
+#define instdone_subslice_mask(dev_priv__) \
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
+
+#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
+ for ((slice__) = 0, (subslice__) = 0; \
+ (slice__) < I915_MAX_SLICES; \
+ (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
+ (slice__) += ((subslice__) == 0)) \
+ for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
+ (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
#endif /* __INTEL_ENGINE_TYPES_H__ */
static bool subunits_stuck(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct intel_instdone instdone;
struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
bool stuck;
stuck &= instdone_unchanged(instdone.slice_common,
&accu_instdone->slice_common);
- for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) {
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
&accu_instdone->sampler[slice][subslice]);
stuck &= instdone_unchanged(instdone.row[slice][subslice],
#include "intel_lrc_reg.h"
#include "intel_sseu.h"
-void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
- u8 max_subslices, u8 max_eus_per_subslice)
-{
- sseu->max_slices = max_slices;
- sseu->max_subslices = max_subslices;
- sseu->max_eus_per_subslice = max_eus_per_subslice;
-
- sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
- sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
-}
-
unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
{
return total;
}
-void intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
- u8 *to_mask)
-{
- int offset = slice * sseu->ss_stride;
-
- memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
-}
-
-u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
-{
- int i, offset = slice * sseu->ss_stride;
- u32 mask;
-
- GEM_BUG_ON(slice >= sseu->max_slices);
-
- GEM_BUG_ON(sseu->ss_stride > sizeof(mask));
-
- for (i = 0; i < sseu->ss_stride; i++)
- mask |= (u32)sseu->subslice_mask[offset + i] <<
- i * BITS_PER_BYTE;
-
- return mask;
-}
-
-void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
- u32 ss_mask)
-{
- int i, offset = slice * sseu->ss_stride;
-
- for (i = 0; i < sseu->ss_stride; i++)
- sseu->subslice_mask[offset + i] =
- (ss_mask >> (BITS_PER_BYTE * i)) & 0xff;
-}
-
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
{
- return hweight32(intel_sseu_get_subslices(sseu, slice));
+ return hweight8(sseu->subslice_mask[slice]);
}
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
-#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES)
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
+ u8 subslice_mask[GEN_MAX_SLICES];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
u8 max_subslices;
u8 max_eus_per_subslice;
- u8 ss_stride;
- u8 eu_stride;
-
/* We don't have more than 8 eus per subslice at the moment and as we
* store eus enabled using bits, no need to multiply by eus per
* subslice.
return value;
}
-static inline bool
-intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
- int subslice)
-{
- u8 mask = sseu->subslice_mask[slice * sseu->ss_stride +
- subslice / BITS_PER_BYTE];
-
- return mask & BIT(subslice % BITS_PER_BYTE);
-}
-
-void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
- u8 max_subslices, u8 max_eus_per_subslice);
-
unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
-void intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
- u8 *to_mask);
-
-u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
-
-void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
- u32 ss_mask);
-
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu);
u32 slice = fls(sseu->slice_mask);
u32 fuse3 =
intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3);
- u32 ss_mask = intel_sseu_get_subslices(sseu, slice);
+ u8 ss_mask = sseu->subslice_mask[slice];
u8 enabled_mask = (ss_mask | ss_mask >>
GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
struct seq_file *m,
struct intel_instdone *instdone)
{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
int slice;
int subslice;
if (INTEL_GEN(dev_priv) <= 6)
return;
- for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice)
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice, instdone->sampler[slice][subslice]);
- for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice)
+ for_each_instdone_slice_subslice(dev_priv, slice, subslice)
seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice, instdone->row[slice][subslice]);
}
continue;
sseu->slice_mask |= BIT(s);
- intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
+ sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
sseu->slice_mask |= BIT(s);
if (IS_GEN9_BC(dev_priv))
- intel_sseu_copy_subslices(&info->sseu, s,
- sseu->subslice_mask);
+ sseu->subslice_mask[s] =
+ RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
- u8 ss_idx = s * info->sseu.ss_stride +
- ss / BITS_PER_BYTE;
if (IS_GEN9_LP(dev_priv)) {
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
- sseu->subslice_mask[ss_idx] |=
- BIT(ss % BITS_PER_BYTE);
+ sseu->subslice_mask[s] |= BIT(ss);
}
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
- struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
int s;
sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
if (sseu->slice_mask) {
- sseu->eu_per_subslice = info->sseu.eu_per_subslice;
- for (s = 0; s < fls(sseu->slice_mask); s++)
- intel_sseu_copy_subslices(&info->sseu, s,
- sseu->subslice_mask);
+ sseu->eu_per_subslice =
+ RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
+ for (s = 0; s < fls(sseu->slice_mask); s++) {
+ sseu->subslice_mask[s] =
+ RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
+ }
sseu->eu_total = sseu->eu_per_subslice *
intel_sseu_subslice_total(sseu);
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) {
- u8 subslice_7eu = info->sseu.subslice_7eu[s];
+ u8 subslice_7eu =
+ RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
sseu->eu_total -= hweight8(subslice_7eu);
}
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
struct sseu_dev_info sseu;
intel_wakeref_t wakeref;
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
- i915_print_sseu_info(m, true, &info->sseu);
+ i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
seq_puts(m, "SSEU Device Status\n");
memset(&sseu, 0, sizeof(sseu));
- intel_sseu_set_info(&sseu, info->sseu.max_slices,
- info->sseu.max_subslices,
- info->sseu.max_eus_per_subslice);
+ sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
+ sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
+ sseu.max_eus_per_subslice =
+ RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
with_intel_runtime_pm(dev_priv, wakeref) {
if (IS_CHERRYVIEW(dev_priv))
struct pci_dev *pdev = dev_priv->drm.pdev;
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
drm_i915_getparam_t *param = data;
- int value = 0;
+ int value;
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
- /* Only copy bits from the first slice */
- memcpy(&value, sseu->subslice_mask,
- min(sseu->ss_stride, (u8)sizeof(value)));
+ value = sseu->subslice_mask[0];
if (!value)
return -ENODEV;
break;
static void error_print_instdone(struct drm_i915_error_state_buf *m,
const struct drm_i915_error_engine *ee)
{
- struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
int slice;
int subslice;
if (INTEL_GEN(m->i915) <= 6)
return;
- for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
+ for_each_instdone_slice_subslice(m->i915, slice, subslice)
err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.sampler[slice][subslice]);
- for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
+ for_each_instdone_slice_subslice(m->i915, slice, subslice)
err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.row[slice][subslice]);
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
+ u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
int ret;
if (query_item->flags != 0)
BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
slice_length = sizeof(sseu->slice_mask);
- subslice_length = sseu->max_slices * sseu->ss_stride;
- eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
+ subslice_length = sseu->max_slices * subslice_stride;
+ eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
total_length = sizeof(topo) + slice_length + subslice_length +
eu_length;
topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
topo.subslice_offset = slice_length;
- topo.subslice_stride = sseu->ss_stride;
+ topo.subslice_stride = subslice_stride;
topo.eu_offset = slice_length + subslice_length;
- topo.eu_stride = sseu->eu_stride;
+ topo.eu_stride = eu_stride;
if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
hweight8(sseu->slice_mask), sseu->slice_mask);
drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
+ drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
s, intel_sseu_subslices_per_slice(sseu, s),
- intel_sseu_get_subslices(sseu, s));
+ sseu->subslice_mask[s]);
}
drm_printf(p, "EU total: %u\n", sseu->eu_total);
drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
int subslice)
{
- int slice_stride = sseu->max_subslices * sseu->eu_stride;
+ int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
+ int slice_stride = sseu->max_subslices * subslice_stride;
- return slice * slice_stride + subslice * sseu->eu_stride;
+ return slice * slice_stride + subslice * subslice_stride;
}
static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
int i, offset = sseu_eu_idx(sseu, slice, subslice);
u16 eu_mask = 0;
- for (i = 0; i < sseu->eu_stride; i++) {
+ for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
(i * BITS_PER_BYTE);
}
{
int i, offset = sseu_eu_idx(sseu, slice, subslice);
- for (i = 0; i < sseu->eu_stride; i++) {
+ for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
sseu->eu_mask[offset + i] =
(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
}
}
for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
+ drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
s, intel_sseu_subslices_per_slice(sseu, s),
- intel_sseu_get_subslices(sseu, s));
+ sseu->subslice_mask[s]);
for (ss = 0; ss < sseu->max_subslices; ss++) {
u16 enabled_eus = sseu_get_eus(sseu, s, ss);
u8 eu_en;
int s;
- if (IS_ELKHARTLAKE(dev_priv))
- intel_sseu_set_info(sseu, 1, 4, 8);
- else
- intel_sseu_set_info(sseu, 1, 8, 8);
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ sseu->max_slices = 1;
+ sseu->max_subslices = 4;
+ sseu->max_eus_per_subslice = 8;
+ } else {
+ sseu->max_slices = 1;
+ sseu->max_subslices = 8;
+ sseu->max_eus_per_subslice = 8;
+ }
s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
for (s = 0; s < sseu->max_slices; s++) {
if (s_en & BIT(s)) {
+ int ss_idx = sseu->max_subslices * s;
int ss;
sseu->slice_mask |= BIT(s);
-
- intel_sseu_set_subslices(sseu, s, ss_en_mask);
-
- for (ss = 0; ss < sseu->max_subslices; ss++)
- if (intel_sseu_has_subslice(sseu, s, ss))
+ sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ if (sseu->subslice_mask[s] & BIT(ss))
sseu_set_eus(sseu, s, ss, eu_en);
+ }
}
}
sseu->eu_per_subslice = hweight8(eu_en);
const int eu_mask = 0xff;
u32 subslice_mask, eu_en;
- intel_sseu_set_info(sseu, 6, 4, 8);
-
sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
GEN10_F2_S_ENA_SHIFT;
+ sseu->max_slices = 6;
+ sseu->max_subslices = 4;
+ sseu->max_eus_per_subslice = 8;
+
+ subslice_mask = (1 << 4) - 1;
+ subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+ GEN10_F2_SS_DIS_SHIFT);
+
+ /*
+ * Slice0 can have up to 3 subslices, but there are only 2 in
+ * slice1/2.
+ */
+ sseu->subslice_mask[0] = subslice_mask;
+ for (s = 1; s < sseu->max_slices; s++)
+ sseu->subslice_mask[s] = subslice_mask & 0x3;
/* Slice0 */
eu_en = ~I915_READ(GEN8_EU_DISABLE0);
eu_en = ~I915_READ(GEN10_EU_DISABLE3);
sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
- subslice_mask = (1 << 4) - 1;
- subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
- GEN10_F2_SS_DIS_SHIFT);
-
+ /* Do a second pass where we mark the subslices disabled if all their
+ * eus are off.
+ */
for (s = 0; s < sseu->max_slices; s++) {
for (ss = 0; ss < sseu->max_subslices; ss++) {
if (sseu_get_eus(sseu, s, ss) == 0)
- subslice_mask &= ~BIT(ss);
+ sseu->subslice_mask[s] &= ~BIT(ss);
}
-
- /*
- * Slice0 can have up to 3 subslices, but there are only 2 in
- * slice1/2.
- */
- intel_sseu_set_subslices(sseu, s, s == 0 ? subslice_mask :
- subslice_mask & 0x3);
}
sseu->eu_total = compute_eu_total(sseu);
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse;
- u8 subslice_mask;
fuse = I915_READ(CHV_FUSE_GT);
sseu->slice_mask = BIT(0);
- intel_sseu_set_info(sseu, 1, 2, 8);
+ sseu->max_slices = 1;
+ sseu->max_subslices = 2;
+ sseu->max_eus_per_subslice = 8;
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
u8 disabled_mask =
(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
- subslice_mask |= BIT(0);
+ sseu->subslice_mask[0] |= BIT(0);
sseu_set_eus(sseu, 0, 0, ~disabled_mask);
}
(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
- subslice_mask |= BIT(1);
+ sseu->subslice_mask[0] |= BIT(1);
sseu_set_eus(sseu, 0, 1, ~disabled_mask);
}
- intel_sseu_set_subslices(sseu, 0, subslice_mask);
-
sseu->eu_total = compute_eu_total(sseu);
/*
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
/* BXT has a single slice and at most 3 subslices. */
- intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
- IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
+ sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3;
+ sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4;
+ sseu->max_eus_per_subslice = 8;
/*
* The subslice disable field is global, i.e. it applies
/* skip disabled slice */
continue;
- intel_sseu_set_subslices(sseu, s, subslice_mask);
+ sseu->subslice_mask[s] = subslice_mask;
eu_disable = I915_READ(GEN9_EU_DISABLE(s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
int eu_per_ss;
u8 eu_disabled_mask;
- if (!intel_sseu_has_subslice(sseu, s, ss))
+ if (!(sseu->subslice_mask[s] & BIT(ss)))
/* skip disabled subslice */
continue;
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
- intel_sseu_set_info(sseu, 3, 3, 8);
+ sseu->max_slices = 3;
+ sseu->max_subslices = 3;
+ sseu->max_eus_per_subslice = 8;
/*
* The subslice disable field is global, i.e. it applies
/* skip disabled slice */
continue;
- intel_sseu_set_subslices(sseu, s, subslice_mask);
+ sseu->subslice_mask[s] = subslice_mask;
for (ss = 0; ss < sseu->max_subslices; ss++) {
u8 eu_disabled_mask;
u32 n_disabled;
- if (!intel_sseu_has_subslice(sseu, s, ss))
+ if (!(sseu->subslice_mask[s] & BIT(ss)))
/* skip disabled subslice */
continue;
eu_disabled_mask =
- eu_disable[s] >>
- (ss * sseu->max_eus_per_subslice);
+ eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse1;
int s, ss;
- u32 subslice_mask;
/*
* There isn't a register to tell us how many slices/subslices. We
/* fall through */
case 1:
sseu->slice_mask = BIT(0);
- subslice_mask = BIT(0);
+ sseu->subslice_mask[0] = BIT(0);
break;
case 2:
sseu->slice_mask = BIT(0);
- subslice_mask = BIT(0) | BIT(1);
+ sseu->subslice_mask[0] = BIT(0) | BIT(1);
break;
case 3:
sseu->slice_mask = BIT(0) | BIT(1);
- subslice_mask = BIT(0) | BIT(1);
+ sseu->subslice_mask[0] = BIT(0) | BIT(1);
+ sseu->subslice_mask[1] = BIT(0) | BIT(1);
break;
}
+ sseu->max_slices = hweight8(sseu->slice_mask);
+ sseu->max_subslices = hweight8(sseu->subslice_mask[0]);
+
fuse1 = I915_READ(HSW_PAVP_FUSE1);
switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
default:
sseu->eu_per_subslice = 6;
break;
}
-
- intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
- hweight8(subslice_mask),
- sseu->eu_per_subslice);
+ sseu->max_eus_per_subslice = sseu->eu_per_subslice;
for (s = 0; s < sseu->max_slices; s++) {
- intel_sseu_set_subslices(sseu, s, subslice_mask);
-
for (ss = 0; ss < sseu->max_subslices; ss++) {
sseu_set_eus(sseu, s, ss,
(1UL << sseu->eu_per_subslice) - 1);