struct skl_hw_state {
struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
- struct skl_ddb_allocation ddb;
struct skl_pipe_wm wm;
} *hw;
- struct skl_ddb_allocation *sw_ddb;
struct skl_pipe_wm *sw_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ u8 hw_enabled_slices;
const enum pipe pipe = crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
- skl_ddb_get_hw_state(dev_priv, &hw->ddb);
- sw_ddb = &dev_priv->wm.skl_hw.ddb;
+ hw_enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 &&
- hw->ddb.enabled_slices != sw_ddb->enabled_slices)
+ hw_enabled_slices != dev_priv->enabled_dbuf_slices_num)
drm_err(&dev_priv->drm,
"mismatch in DBUF Slices (expected %u, got %u)\n",
- sw_ddb->enabled_slices,
- hw->ddb.enabled_slices);
+ dev_priv->enabled_dbuf_slices_num,
+ hw_enabled_slices);
/* planes */
for_each_universal_plane(dev_priv, pipe, plane) {
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- u8 required_slices = state->wm_results.ddb.enabled_slices;
+ u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
+ u8 required_slices = state->enabled_dbuf_slices_num;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
u8 update_pipes = 0, modeset_pipes = 0;
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+ const u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
bool ret;
if (req_slices > intel_dbuf_max_slices(dev_priv)) {
ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
if (ret)
- dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
+ dev_priv->enabled_dbuf_slices_num = req_slices;
}
static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
* FIXME: for now pretend that we only have 1 slice, see
* intel_enabled_dbuf_slices_num().
*/
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+ dev_priv->enabled_dbuf_slices_num = 1;
}
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
* FIXME: for now pretend that the first slice is always
* enabled, see intel_enabled_dbuf_slices_num().
*/
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+ dev_priv->enabled_dbuf_slices_num = 1;
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
-static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
+u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
{
- u8 enabled_slices;
+ u8 enabled_dbuf_slices_num;
/* Slice 1 will always be enabled */
- enabled_slices = 1;
+ enabled_dbuf_slices_num = 1;
/* Gen prior to GEN11 have only one DBuf slice */
if (INTEL_GEN(dev_priv) < 11)
- return enabled_slices;
+ return enabled_dbuf_slices_num;
/*
* FIXME: for now we'll only ever use 1 slice; pretend that we have
* toggling of the second slice.
*/
if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
- enabled_slices++;
+ enabled_dbuf_slices_num++;
- return enabled_slices;
+ return enabled_dbuf_slices_num;
}
/*
static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
- const int num_active,
- struct skl_ddb_allocation *ddb)
+ const int num_active)
{
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
const struct drm_display_mode *adjusted_mode;
u64 total_data_bw;
u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
* - should validate we stay within the hw bandwidth limits
*/
if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
- ddb->enabled_slices = 2;
+ intel_state->enabled_dbuf_slices_num = 2;
} else {
- ddb->enabled_slices = 1;
+ intel_state->enabled_dbuf_slices_num = 1;
ddb_size /= 2;
}
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
- struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
*num_active = hweight8(dev_priv->active_pipes);
ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
- *num_active, ddb);
+ *num_active);
/*
* If the state doesn't change the active CRTC's or there is no
intel_display_power_put(dev_priv, power_domain, wakeref);
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */)
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
{
- ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+ dev_priv->enabled_dbuf_slices_num =
+ intel_enabled_dbuf_slices_num(dev_priv);
}
/*
}
static int
-skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
- struct skl_ddb_allocation *ddb /* out */)
+skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
{
struct drm_atomic_state *state = crtc_state->uapi.state;
struct drm_crtc *crtc = crtc_state->uapi.crtc;
skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
- ddb, alloc, &num_active);
+ alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
- const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+ state->enabled_dbuf_slices_num = dev_priv->enabled_dbuf_slices_num;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
+ ret = skl_allocate_pipe_ddb(new_crtc_state);
if (ret)
return ret;
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
- skl_ddb_get_hw_state(dev_priv, ddb);
+ skl_ddb_get_hw_state(dev_priv);
for_each_intel_crtc(&dev_priv->drm, crtc) {
crtc_state = to_intel_crtc_state(crtc->base.state);
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
-struct skl_ddb_allocation;
struct skl_ddb_entry;
struct skl_pipe_wm;
struct skl_wm_level;
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv);
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);