#include "intel_bw.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
+#include "intel_atomic.h"
+#include "intel_pm.h"
+
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
return 0;
}
+int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+ u32 points_mask)
+{
+ int ret;
+
+ /* bspec says to keep retrying for at least 1 ms */
+ ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ points_mask,
+ ICL_PCODE_POINTS_RESTRICTED_MASK,
+ ICL_PCODE_POINTS_RESTRICTED,
+ 1);
+
+ if (ret < 0) {
+ drm_err(&dev_priv->drm, "Failed to disable qgv points (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi)
{
break;
}
+ /*
+ * In case if SAGV is disabled in BIOS, we always get 1
+ * SAGV point, but we can't send PCode commands to restrict it
+ * as it will fail and pointless anyway.
+ */
+ if (qi.num_points == 1)
+ dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ else
+ dev_priv->sagv_status = I915_SAGV_ENABLED;
+
return 0;
}
{
int i;
+ /*
+ * Let's return max bw for 0 planes
+ */
+ num_planes = max(1, num_planes);
+
for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
const struct intel_bw_info *bi =
&dev_priv->max_bw[i];
icl_get_bw_info(dev_priv, &icl_sa_info);
}
-static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
- int num_planes)
-{
- if (INTEL_GEN(dev_priv) >= 11) {
- /*
- * Any bw group has same amount of QGV points
- */
- const struct intel_bw_info *bi =
- &dev_priv->max_bw[0];
- unsigned int min_bw = UINT_MAX;
- int i;
-
- /*
- * FIXME with SAGV disabled maybe we can assume
- * point 1 will always be used? Seems to match
- * the behaviour observed in the wild.
- */
- for (i = 0; i < bi->num_qgv_points; i++) {
- unsigned int bw = icl_max_bw(dev_priv, num_planes, i);
-
- min_bw = min(bw, min_bw);
- }
- return min_bw;
- } else {
- return UINT_MAX;
- }
-}
-
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
{
/*
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_bw_state *new_bw_state = NULL;
- unsigned int data_rate, max_data_rate;
+ const struct intel_bw_state *old_bw_state = NULL;
+ unsigned int data_rate;
unsigned int num_active_planes;
struct intel_crtc *crtc;
int i, ret;
+ u32 allowed_points = 0;
+ unsigned int max_bw_point = 0, max_bw = 0;
+ unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
+ u32 mask = (1 << num_qgv_points) - 1;
/* FIXME earlier gens need some checks too */
if (INTEL_GEN(dev_priv) < 11)
return ret;
data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
+ data_rate = DIV_ROUND_UP(data_rate, 1000);
+
num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
- max_data_rate = intel_max_data_rate(dev_priv, num_active_planes);
+ for (i = 0; i < num_qgv_points; i++) {
+ unsigned int max_data_rate;
- data_rate = DIV_ROUND_UP(data_rate, 1000);
+ max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
+ /*
+ * We need to know which qgv point gives us
+ * maximum bandwidth in order to disable SAGV
+ * if we find that we exceed SAGV block time
+ * with watermarks. By that moment we already
+ * have those, as it is calculated earlier in
+ * intel_atomic_check,
+ */
+ if (max_data_rate > max_bw) {
+ max_bw_point = i;
+ max_bw = max_data_rate;
+ }
+ if (max_data_rate >= data_rate)
+ allowed_points |= BIT(i);
+ drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
+ i, max_data_rate, data_rate);
+ }
- if (data_rate > max_data_rate) {
- drm_dbg_kms(&dev_priv->drm,
- "Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
- data_rate, max_data_rate, num_active_planes);
+ /*
+ * BSpec states that we always should have at least one allowed point
+ * left, so if we couldn't - simply reject the configuration for obvious
+ * reasons.
+ */
+ if (allowed_points == 0) {
+ drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
+ " bandwidth %d for display configuration(%d active planes).\n",
+ data_rate, num_active_planes);
return -EINVAL;
}
+ /*
+ * Leave only single point with highest bandwidth, if
+ * we can't enable SAGV due to the increased memory latency it may
+ * cause.
+ */
+ if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
+ allowed_points = BIT(max_bw_point);
+ drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
+ max_bw_point);
+ }
+ /*
+ * We store the ones which need to be masked as that is what PCode
+ * actually accepts as a parameter.
+ */
+ new_bw_state->qgv_points_mask = ~allowed_points & mask;
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ /*
+ * If the actual mask had changed we need to make sure that
+ * the commits are serialized(in case this is a nomodeset, nonblocking)
+ */
+ if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_bw_state *new_bw_state;
+ const struct intel_bw_state *old_bw_state;
+ u32 new_mask = 0;
/*
* Just return if we can't control SAGV or don't have it.
if (!new_bw_state)
return;
- if (!intel_can_enable_sagv(dev_priv, new_bw_state))
+ if (INTEL_GEN(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) {
intel_disable_sagv(dev_priv);
+ return;
+ }
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ /*
+ * Nothing to mask
+ */
+ if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
+ return;
+
+ new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+ /*
+ * If new mask is zero - means there is nothing to mask,
+ * we can only unmask, which should be done in unmask.
+ */
+ if (!new_mask)
+ return;
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(dev_priv, new_mask);
}
void intel_sagv_post_plane_update(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_bw_state *new_bw_state;
+ const struct intel_bw_state *old_bw_state;
+ u32 new_mask = 0;
/*
* Just return if we can't control SAGV or don't have it.
if (!new_bw_state)
return;
- if (intel_can_enable_sagv(dev_priv, new_bw_state))
+ if (INTEL_GEN(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) {
intel_enable_sagv(dev_priv);
+ return;
+ }
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ /*
+ * Nothing to unmask
+ */
+ if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask)
+ return;
+
+ new_mask = new_bw_state->qgv_points_mask;
+
+ /*
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(dev_priv, new_mask);
}
static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)